diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java index 11526a768f..0696345af3 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java @@ -1158,7 +1158,6 @@ private int executeFile(String fileName) { return ERRNO_OTHER; } finally { IOUtils.closeStream(fileStream); - output(""); // dummy new line } } diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index bbfbc36685..f08b970356 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.MetaStoreEventListener; +import org.apache.hadoop.hive.metastore.MetaStoreEventListenerConstants; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.RawStoreProxy; import org.apache.hadoop.hive.metastore.ReplChangeManager; diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 5282a5a4fb..74d0efea52 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -92,6 +92,11 @@ public boolean commitTransaction() { return objectStore.commitTransaction(); } + @Override + public boolean isActiveTransaction() { + return false; + } + @Override public Configuration getConf() { return objectStore.getConf(); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 50d8878e1c..976c3c5a02 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreEventListener; +import org.apache.hadoop.hive.metastore.MetaStoreEventListenerConstants; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FireEventRequest; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 1b0b53793a..4b1df8e17d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -608,6 +608,55 @@ public void testListPartitionsWihtLimitEnabled() throws Throwable { assertEquals(" should have returned 50 partitions", maxParts, partitions.size()); } + public void testAlterTableCascade() throws Throwable { + // create a table with multiple partitions + String dbName = "compdb"; + String tblName = "comptbl"; + String typeName = "Person"; + + cleanUp(dbName, tblName, typeName); + + List> values = new ArrayList>(); + values.add(makeVals("2008-07-01 14:13:12", "14")); + values.add(makeVals("2008-07-01 14:13:12", "15")); + values.add(makeVals("2008-07-02 14:13:12", "15")); + values.add(makeVals("2008-07-03 14:13:12", "151")); + + createMultiPartitionTableSchema(dbName, tblName, typeName, values); + Table tbl = client.getTable(dbName, tblName); + List cols = tbl.getSd().getCols(); + cols.add(new FieldSchema("new_col", serdeConstants.STRING_TYPE_NAME, "")); + tbl.getSd().setCols(cols); + //add new column with cascade option + client.alter_table(dbName, tblName, tbl, true); + // + Table tbl2 = client.getTable(dbName, tblName); + Assert.assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName()); + //get a partition + List pvalues = new ArrayList<>(2); + pvalues.add("2008-07-01 14:13:12"); + pvalues.add("14"); + Partition partition = client.getPartition(dbName, tblName, pvalues); + Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName()); + + //add another column + cols = tbl.getSd().getCols(); + cols.add(new FieldSchema("new_col2", serdeConstants.STRING_TYPE_NAME, "")); + tbl.getSd().setCols(cols); + //add new column with no cascade option + client.alter_table(dbName, tblName, tbl, false); + tbl2 = client.getTable(dbName, tblName); + Assert.assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); + Assert.assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); + //get partition, this partition should not have the newly added column since cascade option + //was false + partition = client.getPartition(dbName, tblName, pvalues); + Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + } + + public void testListPartitionNames() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; diff --git a/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql b/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql index 9ee7c1115b..59e702bd7b 100644 --- a/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql +++ b/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql @@ -1,4 +1,4 @@ -CREATE TABLE IF NOT EXISTS "KEY_CONSTRAINTS" +CREATE TABLE "KEY_CONSTRAINTS" ( "CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" BIGINT, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c4e45a19af..616314cb36 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -904,7 +904,7 @@ private void create_database_core(RawStore ms, final Database db) EventType.CREATE_DATABASE, new CreateDatabaseEvent(db, success, this), null, - transactionalListenersResponses); + transactionalListenersResponses, ms); } } } @@ -1136,7 +1136,7 @@ private void drop_database_core(RawStore ms, EventType.DROP_DATABASE, new DropDatabaseEvent(db, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -1477,7 +1477,7 @@ private void create_table_core(final RawStore ms, final Table tbl, EventType.CREATE_TABLE, new CreateTableEvent(tbl, success, this), envContext, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -1722,7 +1722,7 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St EventType.DROP_TABLE, new DropTableEvent(tbl, deleteData, success, this), envContext, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } return success; @@ -2266,7 +2266,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab EventType.ADD_PARTITION, new AddPartitionEvent(tbl, part, success, this), envContext, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } return part; @@ -2521,7 +2521,8 @@ public Object run() throws Exception { if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, parts, false, this)); + new AddPartitionEvent(tbl, parts, false, this), + null, null, ms); } } else { if (!listeners.isEmpty()) { @@ -2529,13 +2530,14 @@ public Object run() throws Exception { EventType.ADD_PARTITION, new AddPartitionEvent(tbl, newParts, true, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); if (!existingParts.isEmpty()) { // The request has succeeded but we failed to add these partitions. MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, - new AddPartitionEvent(tbl, existingParts, false, this)); + new AddPartitionEvent(tbl, existingParts, false, this), + null, null, ms); } } } @@ -2723,7 +2725,7 @@ public Object run() throws Exception { EventType.ADD_PARTITION, new AddPartitionEvent(tbl, partitionSpecProxy, true, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -2877,7 +2879,7 @@ private Partition add_partition_core(final RawStore ms, EventType.ADD_PARTITION, new AddPartitionEvent(tbl, Arrays.asList(part), success, this), envContext, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } @@ -3031,7 +3033,7 @@ public Partition exchange_partition(Map partitionSpecs, EventType.ADD_PARTITION, addPartitionEvent, null, - transactionalListenerResponsesForAddPartition); + transactionalListenerResponsesForAddPartition, ms); i = 0; for (Partition partition : partitionsToExchange) { @@ -3046,7 +3048,7 @@ public Partition exchange_partition(Map partitionSpecs, EventType.DROP_PARTITION, dropPartitionEvent, null, - parameters); + parameters, ms); i++; } } @@ -3137,7 +3139,7 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na EventType.DROP_PARTITION, new DropPartitionEvent(tbl, part, success, deleteData, this), envContext, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } return true; @@ -3156,8 +3158,10 @@ private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) { } private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) throws IOException, MetaException { - if (depth > 0 && parent != null && wh.isWritable(parent) && wh.isEmpty(parent)) { - wh.deleteDir(parent, true, mustPurge); + if (depth > 0 && parent != null && wh.isWritable(parent)) { + if (wh.isDir(parent) && wh.isEmpty(parent)) { + wh.deleteDir(parent, true, mustPurge); + } deleteParentRecursive(parent.getParent(), depth - 1, mustPurge); } } @@ -3334,7 +3338,7 @@ public DropPartitionsResult drop_partitions_req( EventType.DROP_PARTITION, new DropPartitionEvent(tbl, part, success, deleteData, this), envContext, - parameters); + parameters, ms); i++; } @@ -3926,7 +3930,7 @@ public void alter_index(final String dbname, final String base_table_name, EventType.ALTER_INDEX, new AlterIndexEvent(oldIndex, newIndex, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -4629,7 +4633,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i EventType.CREATE_INDEX, new AddIndexEvent(index, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -4722,7 +4726,7 @@ private boolean drop_index_by_name_core(final RawStore ms, EventType.DROP_INDEX, new DropIndexEvent(index, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } return success; @@ -6193,7 +6197,7 @@ public void create_function(Function func) throws AlreadyExistsException, EventType.CREATE_FUNCTION, new CreateFunctionEvent(func, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } @@ -6232,7 +6236,7 @@ public void drop_function(String dbName, String funcName) EventType.DROP_FUNCTION, new DropFunctionEvent(func, success, this), null, - transactionalListenerResponses); + transactionalListenerResponses, ms); } } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 700262047f..b1a9782247 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -50,6 +50,7 @@ import javax.security.auth.login.LoginException; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.auth.HiveAuthUtils; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -128,6 +129,10 @@ public HiveMetaStoreClient(HiveConf conf) throws MetaException { this(conf, null, true); } + public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) throws MetaException { + this(conf, hookLoader, true); + } + public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) throws MetaException { @@ -354,6 +359,16 @@ public void alter_table(String dbname, String tbl_name, Table new_tbl) alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); } + @Override + public void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException { + EnvironmentContext environmentContext = new EnvironmentContext(); + if (cascade) { + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + } + @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { @@ -1488,12 +1503,24 @@ public int getNumPartitionsByFilter(String db_name, String tbl_name, return client.get_num_partitions_by_filter(db_name, tbl_name, filter); } + @Override + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + } + @Override public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); } + @Override + public void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + } + @Override public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index e9df1e149a..e7ead6b16b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -26,6 +26,7 @@ import java.util.Map.Entry; import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; @@ -707,6 +708,14 @@ void createTable(Table tbl) throws AlreadyExistsException, void alter_table(String defaultDatabaseName, String tblName, Table table) throws InvalidOperationException, MetaException, TException; + /** + * Use alter_table_with_environmentContext instead of alter_table with cascade option + * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE} + */ + @Deprecated + void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException; + //wrapper of alter_table_with_cascade void alter_table_with_environmentContext(String defaultDatabaseName, String tblName, Table table, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, @@ -780,6 +789,26 @@ boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + + /** + * updates a partition to new partition + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException; + /** * updates a partition to new partition * @@ -815,7 +844,28 @@ void alter_partition(String dbName, String tblName, Partition newPart, Environme * @throws TException * if error in communicating with metastore server */ - void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException; + + /** + * updates a list of partitions + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @param environmentContext + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException; /** diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListenerConstants.java similarity index 76% rename from hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java rename to metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListenerConstants.java index a4f2d592ce..79de79d11f 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/MetaStoreEventListenerConstants.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListenerConstants.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hive.hcatalog.listener; +package org.apache.hadoop.hive.metastore; /** * Keeps a list of reserved keys used by Hive listeners when updating the ListenerEvent @@ -30,4 +30,12 @@ * across other MetaStoreEventListener implementations. */ public static final String DB_NOTIFICATION_EVENT_ID_KEY_NAME = "DB_NOTIFICATION_EVENT_ID_KEY_NAME"; + + /* + * HiveMetaStore keys reserved for updating ListenerEvent parameters. + * + * HIVE_METASTORE_TRANSACTION_ACTIVE This key is used to check if a listener event is run inside a current + * transaction. A boolean value is used for active (true) or no active (false). + */ + public static final String HIVE_METASTORE_TRANSACTION_ACTIVE = "HIVE_METASTORE_TRANSACTION_ACTIVE"; } \ No newline at end of file diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java index 20011ccec8..37327f8bd2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java @@ -43,6 +43,7 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.hive.metastore.MetaStoreEventListenerConstants.HIVE_METASTORE_TRANSACTION_ACTIVE; import static org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; /** @@ -201,11 +202,17 @@ public void notify(MetaStoreEventListener listener, ListenerEvent event) throws * the (ListenerEvent) event by setting a parameter key/value pair. These updated parameters will * be returned to the caller. * + * Sometimes these events are run inside a DB transaction and might cause issues with the listeners, + * for instance, Sentry blocks the HMS until an event is seen committed on the DB. To notify the listener about this, + * a new parameter to verify if a transaction is active is added to the ListenerEvent, and is up to the listener + * to skip this notification if so. + * * @param listeners List of MetaStoreEventListener listeners. * @param eventType Type of the notification event. * @param event The ListenerEvent with information about the event. * @param environmentContext An EnvironmentContext object with parameters sent by the HMS client. * @param parameters A list of key/value pairs with the new parameters to add. + * @param ms The RawStore object from where to check if a transaction is active. * @return A list of key/value pair parameters that the listeners set. The returned object will return an empty * map if no parameters were updated or if no listeners were notified. * @throws MetaException If an error occurred while calling the listeners. @@ -214,11 +221,17 @@ public void notify(MetaStoreEventListener listener, ListenerEvent event) throws EventType eventType, ListenerEvent event, EnvironmentContext environmentContext, - Map parameters) throws MetaException { + Map parameters, + final RawStore ms) throws MetaException { Preconditions.checkNotNull(event, "The event must not be null."); event.putParameters(parameters); + + if (ms != null) { + event.putParameter(HIVE_METASTORE_TRANSACTION_ACTIVE, Boolean.toString(ms.isActiveTransaction())); + } + return notifyEvent(listeners, eventType, event, environmentContext); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index a63519a744..e6a918b7b7 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -234,26 +234,22 @@ private Pattern partitionValidationPattern; /** - * A class to pass the Query object to the caller to let the caller release - * resources by calling QueryWrapper.query.closeAll() after consuming all the query results. + * A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release + * the resources when the QueryWrapper goes out of scope */ - public static class QueryWrapper { + public static class QueryWrapper implements AutoCloseable { public Query query; /** * Explicitly closes the query object to release the resources */ + @Override public void close() { if (query != null) { query.closeAll(); query = null; } } - - @Override - protected void finalize() { - this.close(); - } } public ObjectStore() { @@ -700,12 +696,7 @@ private MDatabase getMDatabase(String name) throws NoSuchObjectException { pm.retrieve(mdb); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } if (mdb == null) { throw new NoSuchObjectException("There is no database named " + name); @@ -824,10 +815,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } success = commitTransaction(); } finally { - if (!success) { - rollbackTransaction(); - } - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } return success; } @@ -858,12 +846,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return databases; } @@ -883,12 +866,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc databases = new ArrayList((Collection) query.execute()); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } Collections.sort(databases); return databases; @@ -956,12 +934,7 @@ public Type getType(String typeName) { } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return type; } @@ -985,12 +958,7 @@ public boolean dropType(String typeName) { success = commitTransaction(); LOG.debug("type not found " + typeName, e); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return success; } @@ -1231,12 +1199,7 @@ public Table getTable(String dbName, String tableName) throws MetaException { } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return tbls; } @@ -1268,12 +1231,7 @@ private int getObjectCount(String fieldName, String objName) { result = (Long) query.execute(); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return result.intValue(); } @@ -1311,12 +1269,7 @@ private int getObjectCount(String fieldName, String objName) { } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return metas; } @@ -1402,12 +1355,7 @@ private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } nmtbl.mtbl = mtbl; return nmtbl; @@ -1450,15 +1398,10 @@ private MTable getMTable(String db, String table) { } committed = commitTransaction(); } finally { - if (!committed) { - rollbackTransaction(); - } + rollbackAndCleanup(committed, query); if (dbExistsQuery != null) { dbExistsQuery.closeAll(); } - if (query != null) { - query.closeAll(); - } } return tables; } @@ -1976,12 +1919,7 @@ private MPartition getMPartition(String dbName, String tableName, List p } } } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return ret; } @@ -2213,10 +2151,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio success = commitTransaction(); return parts; } finally { - if (!success) { - rollbackTransaction(); - } - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } } @@ -2318,6 +2253,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, for (Iterator i = names.iterator(); i.hasNext();) { pns.add((String) i.next()); } + if (query != null) { query.closeAll(); } @@ -2412,10 +2348,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } success = commitTransaction(); } finally { - if (!success) { - rollbackTransaction(); - } - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } return partitions; } @@ -2437,10 +2370,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } success = commitTransaction(); } finally { - if (!success) { - rollbackTransaction(); - } - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } return partitionNames; } @@ -3206,12 +3136,7 @@ private String makeParameterDeclarationStringObj(Map params) { success = commitTransaction(); LOG.debug("Done retrieving all objects for listTableNamesByFilter"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return tableNames; } @@ -3257,12 +3182,7 @@ private String makeParameterDeclarationStringObj(Map params) { success = commitTransaction(); LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return partNames; } @@ -3481,10 +3401,7 @@ private void removeUnusedColumnDescriptor(MColumnDescriptor oldCD) { success = commitTransaction(); LOG.debug("successfully deleted a CD in removeUnusedColumnDescriptor"); } finally { - if (!success) { - rollbackTransaction(); - } - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } } @@ -3568,12 +3485,7 @@ private boolean constraintNameAlreadyExists(String name) { constraintNameIfExists = (String) constraintExistsQuery.execute(name); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (constraintExistsQuery != null) { - constraintExistsQuery.closeAll(); - } + rollbackAndCleanup(commited, constraintExistsQuery); } return constraintNameIfExists != null && !constraintNameIfExists.isEmpty(); } @@ -3821,12 +3733,7 @@ private MIndex getMIndex(String dbName, String originalTblName, String indexName pm.retrieve(midx); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return midx; } @@ -3889,12 +3796,7 @@ private Index convertToIndex(MIndex mIndex) throws MetaException { return indexes; } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } } @@ -3921,12 +3823,7 @@ private Index convertToIndex(MIndex mIndex) throws MetaException { } success = commitTransaction(); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return pns; } @@ -4049,12 +3946,7 @@ private MRoleMap getMSecurityUserRoleMap(String userName, PrincipalType principa pm.retrieve(mRoleMember); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return mRoleMember; } @@ -4123,11 +4015,7 @@ public boolean removeRole(String roleName) throws MetaException, } success = commitTransaction(); } finally { - if (!success) { - rollbackTransaction(); - } - - queryWrapper.close(); + rollbackAndCleanup(success, queryWrapper); } return success; } @@ -4197,12 +4085,7 @@ private void getAllRoleAncestors(Set processedRoleNames, List LOG.debug("Done retrieving all objects for listRoles"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } if (principalType == PrincipalType.USER) { @@ -4268,7 +4151,6 @@ private void getAllRoleAncestors(Set processedRoleNames, List mRoleMemebership = (List) query.execute(roleName, principalType.toString()); pm.retrieveAll(mRoleMemebership); success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole"); } finally { if (!success) { @@ -4302,12 +4184,7 @@ private MRole getMRole(String roleName) { pm.retrieve(mrole); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return mrole; } @@ -4329,12 +4206,7 @@ private MRole getMRole(String roleName) { success = commitTransaction(); return roleNames; } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } } @@ -5160,12 +5032,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Done retrieving all objects for listRoleMembers"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mRoleMemeberList; } @@ -5216,12 +5083,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) userNameDbPriv.addAll(mPrivs); } } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return userNameDbPriv; } @@ -5261,12 +5123,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) commited = commitTransaction(); return convertGlobal(userNameDbPriv); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } } @@ -5309,12 +5166,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) mSecurityDBList.addAll(mPrivs); LOG.debug("Done retrieving all objects for listPrincipalDBGrants"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mSecurityDBList; } @@ -5437,12 +5289,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Done retrieving all objects for listAllTableGrants"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mSecurityTabList; } @@ -5469,12 +5316,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Done retrieving all objects for listTableAllPartitionGrants"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mSecurityTabPartList; } @@ -5502,12 +5344,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Done retrieving all objects for listTableAllColumnGrants"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mTblColPrivilegeList; } @@ -5536,12 +5373,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Done retrieving all objects for listTableAllPartitionColumnGrants"); } finally { - if (!success) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(success, query); } return mSecurityColList; } @@ -5584,7 +5416,6 @@ public void dropPartitionAllColumnGrantsNoTxn( private List listDatabaseGrants(String dbName, QueryWrapper queryWrapper) { dbName = HiveStringUtils.normalizeIdentifier(dbName); boolean success = false; - try { LOG.debug("Executing listDatabaseGrants"); @@ -5692,12 +5523,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partName) throws In public Collection executeJDOQLSelect(String queryStr, QueryWrapper queryWrapper) { boolean committed = false; Collection result = null; - try { openTransaction(); Query query = queryWrapper.query = pm.newQuery(queryStr); @@ -6504,12 +6269,7 @@ public long executeJDOQLUpdate(String queryStr) { return -1; } } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6539,12 +6299,7 @@ public long executeJDOQLUpdate(String queryStr) { return null; } } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6655,12 +6410,7 @@ public UpdateMDatabaseURIRetVal updateMDatabaseURI(URI oldLoc, URI newLoc, boole } return retVal; } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6748,12 +6498,7 @@ public UpdatePropURIRetVal updateTblPropURI(URI oldLoc, URI newLoc, String tblPr } return retVal; } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6787,12 +6532,7 @@ public UpdatePropURIRetVal updateMStorageDescriptorTblPropURI(URI oldLoc, URI ne } return retVal; } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6883,12 +6623,7 @@ public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI(URI o } return retVal; } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -6965,12 +6700,7 @@ public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, String serdeP } return retVal; } finally { - if (!committed) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(committed, query); } } @@ -7181,7 +6911,6 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List()); try { openTransaction(); long lastEvent = rqst.getLastEvent(); @@ -8269,11 +7951,9 @@ public NotificationEventResponse getNextNotification(NotificationEventRequest rq Collection events = (Collection) query.execute(lastEvent); commited = commitTransaction(); if (events == null) { - return null; + return result; } Iterator i = events.iterator(); - NotificationEventResponse result = new NotificationEventResponse(); - result.setEvents(new ArrayList()); int maxEvents = rqst.getMaxEvents() > 0 ? rqst.getMaxEvents() : Integer.MAX_VALUE; int numEvents = 0; while (i.hasNext() && numEvents++ < maxEvents) { @@ -8281,11 +7961,8 @@ public NotificationEventResponse getNextNotification(NotificationEventRequest rq } return result; } finally { - if (query != null) { - query.closeAll(); - } if (!commited) { - rollbackTransaction(); + rollbackAndCleanup(commited, query); return null; } } @@ -8315,12 +7992,7 @@ public void addNotificationEvent(NotificationEvent entry) { pm.makePersistent(translateThriftToDb(entry)); commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } } @@ -8340,12 +8012,7 @@ public void cleanNotificationEvents(int olderThan) { } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } } @@ -8364,12 +8031,7 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { commited = commitTransaction(); return new CurrentNotificationEventId(id); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } } @@ -8586,12 +8248,7 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return primaryKeys; } @@ -8616,12 +8273,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return ret; } @@ -8740,12 +8392,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } commited = commitTransaction(); } finally { - if (!commited) { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + rollbackAndCleanup(commited, query); } return foreignKeys; } @@ -8773,4 +8420,43 @@ public void dropConstraint(String dbName, String tableName, } } + /** + * This is a cleanup method which is used to rollback a active transaction + * if the success flag is false and close the associated Query object. This method is used + * internally and visible for testing purposes only + * @param success Rollback the current active transaction if false + * @param query Query object which needs to be closed + */ + @VisibleForTesting + void rollbackAndCleanup(boolean success, Query query) { + try { + if(!success) { + rollbackTransaction(); + } + } finally { + if (query != null) { + query.closeAll(); + } + } + } + + /** + * This is a cleanup method which is used to rollback a active transaction + * if the success flag is false and close the associated QueryWrapper object. This method is used + * internally and visible for testing purposes only + * @param success Rollback the current active transaction if false + * @param queryWrapper QueryWrapper object which needs to be closed + */ + @VisibleForTesting + void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) { + try { + if(!success) { + rollbackTransaction(); + } + } finally { + if (queryWrapper != null) { + queryWrapper.close(); + } + } + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 6f4f031710..5b40835768 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -93,6 +93,8 @@ @CanNotRetry public abstract boolean commitTransaction(); + public boolean isActiveTransaction(); + /** * Rolls back the current transaction if it is active */ diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 6593fa63b8..ecddb8a2fe 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -138,6 +138,11 @@ public boolean commitTransaction() { return true; } + @Override + public boolean isActiveTransaction() { + return txnNestLevel != 0; + } + @Override public void rollbackTransaction() { txnNestLevel = 0; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f64b08d8fe..275797ecfc 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -95,6 +95,11 @@ public boolean commitTransaction() { } } + @Override + public boolean isActiveTransaction() { + return false; + } + // All remaining functions simply delegate to objectStore @Override diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 26828865bc..7f1784efb7 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -108,6 +108,11 @@ public boolean commitTransaction() { return false; } + @Override + public boolean isActiveTransaction() { + return false; + } + @Override public void rollbackTransaction() { diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 9b8eaf2ab7..69e8826f53 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -59,9 +59,12 @@ import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.jdo.Query; + public class TestObjectStore { private ObjectStore objectStore = null; @@ -153,11 +156,16 @@ public void testNotificationOps() throws InterruptedException { Assert.assertEquals(2, eventResponse.getEventsSize()); Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); + // Verify that getNextNotification(last) returns events after a specified event eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); Assert.assertEquals(1, eventResponse.getEventsSize()); Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + // Verify that getNextNotification(last) returns zero events if there are no more notifications available + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); + Assert.assertEquals(0, eventResponse.getEventsSize()); + // Verify that cleanNotificationEvents() cleans up all old notifications Thread.sleep(1); objectStore.cleanNotificationEvents(1); @@ -405,4 +413,15 @@ public static void dropAllStoreObjects(RawStore store) throws MetaException, Inv } catch (NoSuchObjectException e) { } } + + @Test + public void testQueryCloseOnError() throws Exception { + ObjectStore spy = Mockito.spy(objectStore); + spy.getAllDatabases(); + spy.getAllFunctions(); + spy.getAllTables(DB1); + spy.getPartitionCount(); + Mockito.verify(spy, Mockito.times(3)) + .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.anyObject()); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java index c2a48061ed..9b46ae7098 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLockManager.java @@ -285,8 +285,10 @@ private ZooKeeperHiveLock lock (HiveLockObject key, HiveLockMode mode, int tryNum = 0; ZooKeeperHiveLock ret = null; Set conflictingLocks = new HashSet(); + Exception lastException = null; do { + lastException = null; tryNum++; try { if (tryNum > 1) { @@ -298,26 +300,22 @@ private ZooKeeperHiveLock lock (HiveLockObject key, HiveLockMode mode, break; } } catch (Exception e1) { + lastException = e1; if (e1 instanceof KeeperException) { KeeperException e = (KeeperException) e1; switch (e.code()) { case CONNECTIONLOSS: case OPERATIONTIMEOUT: + case NONODE: + case NODEEXISTS: LOG.debug("Possibly transient ZooKeeper exception: ", e); - continue; + break; default: LOG.error("Serious Zookeeper exception: ", e); break; } - } - if (tryNum >= numRetriesForLock) { - console.printError("Unable to acquire " + key.getData().getLockMode() - + ", " + mode + " lock " + key.getDisplayName() + " after " - + tryNum + " attempts."); - LOG.error("Exceeds maximum retries with errors: ", e1); - printConflictingLocks(key,mode,conflictingLocks); - conflictingLocks.clear(); - throw new LockException(e1); + } else { + LOG.error("Other unexpected exception: ", e1); } } } while (tryNum < numRetriesForLock); @@ -327,8 +325,11 @@ private ZooKeeperHiveLock lock (HiveLockObject key, HiveLockMode mode, + ", " + mode + " lock " + key.getDisplayName() + " after " + tryNum + " attempts."); printConflictingLocks(key,mode,conflictingLocks); + if (lastException != null) { + LOG.error("Exceeds maximum retries with errors: ", lastException); + throw new LockException(lastException); + } } - conflictingLocks.clear(); return ret; } @@ -350,6 +351,19 @@ private void printConflictingLocks(HiveLockObject key, HiveLockMode mode, } } + /** + * Creates a primitive lock object on ZooKeeper. + * @param key The lock data + * @param mode The lock mode (HiveLockMode - EXCLUSIVE/SHARED/SEMI_SHARED) + * @param keepAlive If true creating PERSISTENT ZooKeeper locks, otherwise EPHEMERAL ZooKeeper + * locks + * @param parentCreated If we expect, that the parent is already created then true, otherwise + * we will try to create the parents as well + * @param conflictingLocks The set where we should collect the conflicting locks when + * the logging level is set to DEBUG + * @return The created ZooKeeperHiveLock object, null if there was a conflicting lock + * @throws Exception If there was an unexpected Exception + */ private ZooKeeperHiveLock lockPrimitive(HiveLockObject key, HiveLockMode mode, boolean keepAlive, boolean parentCreated, Set conflictingLocks) @@ -390,7 +404,7 @@ private ZooKeeperHiveLock lockPrimitive(HiveLockObject key, int seqNo = getSequenceNumber(res, getLockName(lastName, mode)); if (seqNo == -1) { curatorFramework.delete().forPath(res); - return null; + throw new LockException("The created node does not contain a sequence number: " + res); } List children = curatorFramework.getChildren().forPath(lastName); @@ -584,7 +598,6 @@ public static void releaseAllLocks(HiveConf conf) throws Exception { /** * @param conf Hive configuration - * @param zkpClient The ZooKeeper client * @param key The object to be compared against - if key is null, then get all locks **/ private static List getLocks(HiveConf conf, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 8eb011ebe2..109bc3a50e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -280,6 +280,19 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc return super.getSchema(dbName, tableName); } + @Deprecated + @Override + public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl, + boolean cascade) throws InvalidOperationException, MetaException, TException { + org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbname, tbl_name); + if (old_tbl != null) { + //actually temp table does not support partitions, cascade is not applicable here + alterTempTable(dbname, tbl_name, old_tbl, new_tbl, null); + return; + } + super.alter_table(dbname, tbl_name, new_tbl, cascade); + } + @Override public void alter_table(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table new_tbl) throws InvalidOperationException, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d49708c029..2a6206286a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -3770,7 +3770,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) if (outputColNames) { for (int i = 0; i < ccount; ++i) { String colAlias = unescapeIdentifier(((ASTNode) collist.getChild(i)) - .getText()); + .getText()).toLowerCase(); failIfColAliasExists(colAliasNamesDuplicateCheck, colAlias); String intName = getColumnInternalName(i); ColumnInfo colInfo = new ColumnInfo(intName, @@ -3783,7 +3783,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) ASTNode child = (ASTNode) collist.getChild(i); assert child.getType() == HiveParser.TOK_TABCOL; String colAlias = unescapeIdentifier(((ASTNode) child.getChild(0)) - .getText()); + .getText()).toLowerCase(); failIfColAliasExists(colAliasNamesDuplicateCheck, colAlias); String intName = getColumnInternalName(i); ColumnInfo colInfo = new ColumnInfo(intName, TypeInfoUtils diff --git a/ql/src/test/queries/clientpositive/date_withtimestamp.q b/ql/src/test/queries/clientpositive/date_withtimestamp.q new file mode 100644 index 0000000000..b6d04f55ac --- /dev/null +++ b/ql/src/test/queries/clientpositive/date_withtimestamp.q @@ -0,0 +1,3 @@ +select "2016-12-29 23:59:59" < cast("2016-12-30" as date); +select "2016-12-30 00:00:00" = cast("2016-12-30" as date); +select "2016-12-31 00:00:01" > cast("2016-12-30" as date); diff --git a/ql/src/test/queries/clientpositive/drop_deleted_partitions.q b/ql/src/test/queries/clientpositive/drop_deleted_partitions.q new file mode 100644 index 0000000000..a758b1b89e --- /dev/null +++ b/ql/src/test/queries/clientpositive/drop_deleted_partitions.q @@ -0,0 +1,18 @@ +create database dmp; + +create table dmp.mp (a string) partitioned by (b string, c string) location '/tmp/dmp_mp'; + +alter table dmp.mp add partition (b='1', c='1'); + +show partitions dmp.mp; + +dfs -rm -R /tmp/dmp_mp/b=1; + +explain extended alter table dmp.mp drop partition (b='1'); +alter table dmp.mp drop partition (b='1'); + +show partitions dmp.mp; + +drop table dmp.mp; + +drop database dmp; diff --git a/ql/src/test/queries/clientpositive/transform3.q b/ql/src/test/queries/clientpositive/transform3.q new file mode 100644 index 0000000000..4a2a36800b --- /dev/null +++ b/ql/src/test/queries/clientpositive/transform3.q @@ -0,0 +1,6 @@ +CREATE TABLE transform3_t1 (col string); +INSERT OVERWRITE TABLE transform3_t1 VALUES('aaaa'); + +SELECT t.newCol FROM ( + SELECT TRANSFORM(col) USING 'cat' AS (NewCol string) FROM transform3_t1 +) t; diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out index 873f1abb25..489f4d2418 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -470,9 +470,9 @@ STAGE PLANS: alias: alltypes_orc Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: null (type: date) + expressions: 1970-12-31 (type: date) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: explain select cast("58.174" as DECIMAL) from alltypes_orc diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out index 10e185f00b..f9a9d2490e 100644 --- a/ql/src/test/results/clientpositive/constantfolding.q.out +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -205,9 +205,9 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: null (type: date) + expressions: 1970-12-31 (type: date) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE diff --git a/ql/src/test/results/clientpositive/date_withtimestamp.q.out b/ql/src/test/results/clientpositive/date_withtimestamp.q.out new file mode 100644 index 0000000000..3661888d8b --- /dev/null +++ b/ql/src/test/results/clientpositive/date_withtimestamp.q.out @@ -0,0 +1,27 @@ +PREHOOK: query: select "2016-12-29 23:59:59" < cast("2016-12-30" as date) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select "2016-12-29 23:59:59" < cast("2016-12-30" as date) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +true +PREHOOK: query: select "2016-12-30 00:00:00" = cast("2016-12-30" as date) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select "2016-12-30 00:00:00" = cast("2016-12-30" as date) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +true +PREHOOK: query: select "2016-12-31 00:00:01" > cast("2016-12-30" as date) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select "2016-12-31 00:00:01" > cast("2016-12-30" as date) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +true diff --git a/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out b/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out new file mode 100644 index 0000000000..e543158ffe --- /dev/null +++ b/ql/src/test/results/clientpositive/drop_deleted_partitions.q.out @@ -0,0 +1,74 @@ +PREHOOK: query: create database dmp +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:dmp +POSTHOOK: query: create database dmp +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:dmp +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:dmp +PREHOOK: Output: dmp@mp +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:dmp +POSTHOOK: Output: dmp@mp +PREHOOK: query: alter table dmp.mp add partition (b='1', c='1') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: dmp@mp +POSTHOOK: query: alter table dmp.mp add partition (b='1', c='1') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: dmp@mp +POSTHOOK: Output: dmp@mp@b=1/c=1 +PREHOOK: query: show partitions dmp.mp +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: dmp@mp +POSTHOOK: query: show partitions dmp.mp +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: dmp@mp +b=1/c=1 +#### A masked pattern was here #### +PREHOOK: query: explain extended alter table dmp.mp drop partition (b='1') +PREHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: query: explain extended alter table dmp.mp drop partition (b='1') +POSTHOOK: type: ALTERTABLE_DROPPARTS +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Drop Table Operator: + Drop Table + table: dmp.mp + +PREHOOK: query: alter table dmp.mp drop partition (b='1') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: dmp@mp +PREHOOK: Output: dmp@mp@b=1/c=1 +POSTHOOK: query: alter table dmp.mp drop partition (b='1') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: dmp@mp +POSTHOOK: Output: dmp@mp@b=1/c=1 +PREHOOK: query: show partitions dmp.mp +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: dmp@mp +POSTHOOK: query: show partitions dmp.mp +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: dmp@mp +PREHOOK: query: drop table dmp.mp +PREHOOK: type: DROPTABLE +PREHOOK: Input: dmp@mp +PREHOOK: Output: dmp@mp +POSTHOOK: query: drop table dmp.mp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: dmp@mp +POSTHOOK: Output: dmp@mp +PREHOOK: query: drop database dmp +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:dmp +PREHOOK: Output: database:dmp +POSTHOOK: query: drop database dmp +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:dmp +POSTHOOK: Output: database:dmp diff --git a/ql/src/test/results/clientpositive/transform3.q.out b/ql/src/test/results/clientpositive/transform3.q.out new file mode 100644 index 0000000000..5f93ed8127 --- /dev/null +++ b/ql/src/test/results/clientpositive/transform3.q.out @@ -0,0 +1,28 @@ +PREHOOK: query: CREATE TABLE transform3_t1 (col string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@transform3_t1 +POSTHOOK: query: CREATE TABLE transform3_t1 (col string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@transform3_t1 +PREHOOK: query: INSERT OVERWRITE TABLE transform3_t1 VALUES('aaaa') +PREHOOK: type: QUERY +PREHOOK: Output: default@transform3_t1 +POSTHOOK: query: INSERT OVERWRITE TABLE transform3_t1 VALUES('aaaa') +POSTHOOK: type: QUERY +POSTHOOK: Output: default@transform3_t1 +POSTHOOK: Lineage: transform3_t1.col SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: SELECT t.newCol FROM ( + SELECT TRANSFORM(col) USING 'cat' AS (NewCol string) FROM transform3_t1 +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@transform3_t1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT t.newCol FROM ( + SELECT TRANSFORM(col) USING 'cat' AS (NewCol string) FROM transform3_t1 +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@transform3_t1 +#### A masked pattern was here #### +aaaa diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java index 9642a7e234..27af6ec2c4 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java @@ -1072,16 +1072,26 @@ public static Date getDate(Object o, PrimitiveObjectInspector oi) { try { result = Date.valueOf(s); } catch (IllegalArgumentException e) { - result = null; + Timestamp ts = getTimestampFromString(s); + if (ts != null) { + result = new Date(ts.getTime()); + } else { + result = null; + } } break; case CHAR: case VARCHAR: { + String val = getString(o, oi).trim(); try { - String val = getString(o, oi).trim(); result = Date.valueOf(val); } catch (IllegalArgumentException e) { - result = null; + Timestamp ts = getTimestampFromString(val); + if (ts != null) { + result = new Date(ts.getTime()); + } else { + result = null; + } } break; } diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 0483e91c4b..985a5bd169 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -39,6 +39,7 @@ import javax.security.auth.Subject; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider.Options; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -1200,6 +1201,14 @@ public boolean arePathsOnSameEncryptionZone(Path path1, Path path2, ((HdfsEncryptionShim)encryptionShim2).hdfsAdmin.getEncryptionZoneForPath(path2)); } + /** + * Compares two encryption key strengths. + * + * @param path1 First path to compare + * @param path2 Second path to compare + * @return 1 if path1 is stronger; 0 if paths are equals; -1 if path1 is weaker. + * @throws IOException If an error occurred attempting to get key metadata + */ @Override public int comparePathKeyStrength(Path path1, Path path2) throws IOException { EncryptionZone zone1, zone2; @@ -1215,7 +1224,7 @@ public int comparePathKeyStrength(Path path1, Path path2) throws IOException { return 1; } - return compareKeyStrength(zone1.getKeyName(), zone2.getKeyName()); + return compareKeyStrength(zone1, zone2); } @Override @@ -1267,28 +1276,28 @@ private void checkKeyProvider() throws IOException { /** * Compares two encryption key strengths. * - * @param keyname1 Keyname to compare - * @param keyname2 Keyname to compare - * @return 1 if path1 is stronger; 0 if paths are equals; -1 if path1 is weaker. + * @param zone1 First EncryptionZone to compare + * @param zone2 Second EncryptionZone to compare + * @return 1 if zone1 is stronger; 0 if zones are equal; -1 if zone1 is weaker. * @throws IOException If an error occurred attempting to get key metadata */ - private int compareKeyStrength(String keyname1, String keyname2) throws IOException { - KeyProvider.Metadata meta1, meta2; + private int compareKeyStrength(EncryptionZone zone1, EncryptionZone zone2) throws IOException { - if (keyProvider == null) { - throw new IOException("HDFS security key provider is not configured on your server."); - } + // zone1, zone2 should already have been checked for nulls. + assert zone1 != null && zone2 != null : "Neither EncryptionZone under comparison can be null."; - meta1 = keyProvider.getMetadata(keyname1); - meta2 = keyProvider.getMetadata(keyname2); + CipherSuite suite1 = zone1.getSuite(); + CipherSuite suite2 = zone2.getSuite(); - if (meta1.getBitLength() < meta2.getBitLength()) { - return -1; - } else if (meta1.getBitLength() == meta2.getBitLength()) { + if (suite1 == null && suite2 == null) { return 0; - } else { + } else if (suite1 == null) { + return -1; + } else if (suite2 == null) { return 1; } + + return Integer.compare(suite1.getAlgorithmBlockSize(), suite2.getAlgorithmBlockSize()); } } diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java index d4b63f0e69..5c42bcc632 100644 --- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java +++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java @@ -54,6 +54,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -405,22 +406,24 @@ public void run() { String principal = SecurityUtil.getServerPrincipal(hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL), "0.0.0.0"); String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { - List kinitArgv = Lists.newLinkedList(); - kinitArgv.add("kinit"); - kinitArgv.add(principal); - kinitArgv.add("-k"); - kinitArgv.add("-t"); - kinitArgv.add(keyTabFile + ";"); - kinitArgv.addAll(argv); - argv = kinitArgv; - } else { - // if doAs is not enabled, we pass the principal/keypad to spark-submit in order to - // support the possible delegation token renewal in Spark - argv.add("--principal"); - argv.add(principal); - argv.add("--keytab"); - argv.add(keyTabFile); + if (StringUtils.isNotBlank(principal) && StringUtils.isNotBlank(keyTabFile)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { + List kinitArgv = Lists.newLinkedList(); + kinitArgv.add("kinit"); + kinitArgv.add(principal); + kinitArgv.add("-k"); + kinitArgv.add("-t"); + kinitArgv.add(keyTabFile + ";"); + kinitArgv.addAll(argv); + argv = kinitArgv; + } else { + // if doAs is not enabled, we pass the principal/keypad to spark-submit in order to + // support the possible delegation token renewal in Spark + argv.add("--principal"); + argv.add(principal); + argv.add("--keytab"); + argv.add(keyTabFile); + } } } if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {