diff --git conf/hive-default.xml.template conf/hive-default.xml.template index ba5b8a9..653f5cc 100644 --- conf/hive-default.xml.template +++ conf/hive-default.xml.template @@ -1,5 +1,7 @@ - +--> + diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index bace609..8230584 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -87,7 +87,6 @@ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeType; import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; @@ -3289,6 +3288,9 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i boolean success = false, indexTableCreated = false; + String[] qualified = + MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName()); + try { ms.openTransaction(); Index old_index = null; @@ -3311,7 +3313,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i Table indexTbl = indexTable; if (indexTbl != null) { try { - indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); + indexTbl = ms.getTable(qualified[0], qualified[1]); } catch (Exception e) { } if (indexTbl != null) { @@ -3332,7 +3334,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i if (!success) { if (indexTableCreated) { try { - this.drop_table(index.getDbName(), index.getIndexTableName(), false); + drop_table(qualified[0], qualified[1], false); } catch (Exception e) { } } @@ -3386,8 +3388,8 @@ private boolean drop_index_by_name_core(final RawStore ms, String idxTblName = index.getIndexTableName(); if (idxTblName != null) { - Table tbl = null; - tbl = this.get_table(dbName, idxTblName); + String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName); + Table tbl = get_table(qualified[0], qualified[1]); if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 5a56ced..51c3f2c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1538,4 +1538,12 @@ public static int getArchivingLevel(Partition part) throws MetaException { return part.getValues().size(); } } + + public static String[] getQualifiedName(String defaultDbName, String tableName) { + String[] names = tableName.split("\\."); + if (names.length == 1) { + return new String[] { defaultDbName, tableName}; + } + return new String[] {names[0], names[1]}; + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 5e2cad7..c3c00fa 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2847,7 +2847,8 @@ private MIndex convertToMIndex(Index index) throws InvalidObjectException, "Original table does not exist for the given index."); } - MTable indexTable = getMTable(index.getDbName(), index.getIndexTableName()); + String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName()); + MTable indexTable = getMTable(qualified[0], qualified[1]); if (indexTable == null) { throw new InvalidObjectException( "Underlying index table does not exist for the given index."); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index ee074ea..97991c2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -668,9 +668,9 @@ private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { privs.addAll(db.showPrivilegeGrant(HiveObjectType.DATABASE, principalName, type, dbName, null, null, null)); } else { - if (showGrantDesc.getColumns() != null) { + if (hiveObjectDesc.getColumns() != null) { // show column level privileges - for (String columnName : showGrantDesc.getColumns()) { + for (String columnName : hiveObjectDesc.getColumns()) { privs.addAll(db.showPrivilegeGrant( HiveObjectType.COLUMN, principalName, type, dbName, tableName, partValues, @@ -1143,8 +1143,7 @@ private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException { } private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - db.dropIndex(SessionState.get().getCurrentDatabase(), dropIdx.getTableName(), - dropIdx.getIndexName(), true); + db.dropIndex(dropIdx.getTableName(), dropIdx.getIndexName(), true); return 0; } @@ -1154,11 +1153,7 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException validateSerDe(crtIndex.getSerde()); } - String indexTableName = - crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() : - MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(), - crtIndex.getTableName(), crtIndex.getIndexName()); - + String indexTableName = crtIndex.getIndexTableName(); if (!Utilities.isDefaultNameNode(conf)) { // If location is specified - ensure that it is a full qualified name makeLocationQualified(crtIndex, indexTableName); @@ -1181,10 +1176,9 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException } private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException { - String dbName = alterIndex.getDbName(); String baseTableName = alterIndex.getBaseTableName(); String indexName = alterIndex.getIndexName(); - Index idx = db.getIndex(dbName, baseTableName, indexName); + Index idx = db.getIndex(baseTableName, indexName); switch(alterIndex.getOp()) { case ADDPROPS: @@ -1195,8 +1189,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException Map props = new HashMap(); Map, Long> basePartTs = new HashMap, Long>(); - Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(), - baseTableName); + Table baseTbl = db.getTable(baseTableName); if (baseTbl.isPartitioned()) { List baseParts; @@ -1243,7 +1236,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException } try { - db.alterIndex(dbName, baseTableName, indexName, idx); + db.alterIndex(baseTableName, indexName, idx); } catch (InvalidOperationException e) { console.printError("Invalid alter operation: " + e.getMessage()); LOG.info("alter index: " + stringifyException(e)); @@ -1404,14 +1397,11 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD private int touch(Hive db, AlterTableSimpleDesc touchDesc) throws HiveException { - String dbName = touchDesc.getDbName(); - String tblName = touchDesc.getTableName(); - - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(touchDesc.getTableName()); if (touchDesc.getPartSpec() == null) { try { - db.alterTable(tblName, tbl); + db.alterTable(touchDesc.getTableName(), tbl); } catch (InvalidOperationException e) { throw new HiveException("Uable to update table"); } @@ -1423,7 +1413,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Specified partition does not exist"); } try { - db.alterPartition(tblName, part); + db.alterPartition(touchDesc.getTableName(), part); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1562,10 +1552,8 @@ boolean partitionInCustomLocation(Table tbl, Partition p) private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext) throws HiveException { - String dbName = simpleDesc.getDbName(); - String tblName = simpleDesc.getTableName(); - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(simpleDesc.getTableName()); if (tbl.getTableType() != TableType.MANAGED_TABLE) { throw new HiveException("ARCHIVE can only be performed on managed tables"); @@ -1767,7 +1755,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, authority.toString(), harPartitionDir.getPath()); // make in Path to ensure no slash at the end setArchived(p, harPath, partSpecInfo.values.size()); - db.alterPartition(tblName, p); + db.alterPartition(simpleDesc.getTableName(), p); } } catch (Exception e) { throw new HiveException("Unable to change the partition info for HAR", e); @@ -1788,10 +1776,8 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) throws HiveException { - String dbName = simpleDesc.getDbName(); - String tblName = simpleDesc.getTableName(); - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(simpleDesc.getTableName()); // Means user specified a table, not a partition if (simpleDesc.getPartSpec() == null) { @@ -1976,7 +1962,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions) { setUnArchived(p); try { - db.alterPartition(tblName, p); + db.alterPartition(simpleDesc.getTableName(), p); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -2025,10 +2011,7 @@ private void msckAddPartitionsOneByOne(Hive db, Table table, private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { - String dbName = desc.getDbName(); - String tblName = desc.getTableName(); - - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(desc.getTableName()); String partName = null; if (desc.getPartSpec() == null) { @@ -2622,15 +2605,7 @@ private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException { public int showColumns(Hive db, ShowColumnsDesc showCols) throws HiveException { - String dbName = showCols.getDbName(); - String tableName = showCols.getTableName(); - Table table = null; - if (dbName == null) { - table = db.getTable(tableName); - } - else { - table = db.getTable(dbName, tableName); - } + Table table = db.getTable(showCols.getTableName()); // write the results in the file DataOutputStream outStream = null; @@ -4681,10 +4656,12 @@ private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws if (crtIndex.getLocation() == null) { // Location is not set, leave it as-is if index doesn't belong to default DB // Currently all indexes are created in current DB only - if (db.getDatabaseCurrent().getName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (Utilities.getDatabaseName(name).equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { // Default database name path is always ignored, use METASTOREWAREHOUSE and object name // instead - path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE), name.toLowerCase()); + String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE); + String tableName = Utilities.getTableName(name); + path = new Path(warehouse, tableName.toLowerCase()); } } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 4450ad3..3771176 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2044,19 +2044,39 @@ public static String formatBinaryString(byte[] array, int start, int length) { * @return String array with two elements, first is db name, second is table name * @throws HiveException */ - public static String[] getDbTableName(String dbtable) throws HiveException{ + public static String[] getDbTableName(String dbtable) throws SemanticException { + return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable); + } + + public static String[] getDbTableName(String defaultDb, String dbtable) throws SemanticException { if(dbtable == null){ return new String[2]; } String[] names = dbtable.split("\\."); switch (names.length) { - case 2: - return names; - case 1: - return new String [] {SessionState.get().getCurrentDatabase(), dbtable}; - default: - throw new HiveException(ErrorMsg.INVALID_TABLE_NAME, dbtable); + case 2: + return names; + case 1: + return new String [] {defaultDb, dbtable}; + default: + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbtable); + } + } + + public static String getDatabaseName(String name) throws SemanticException { + String[] split = name.split("\\."); + if (split.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, name); + } + return split[0]; + } + + public static String getTableName(String name) throws SemanticException { + String[] split = name.split("\\."); + if (split.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, name); } + return split[1]; } public static void validateColumnNames(List colNames, List checkCols) diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a891838..441c527 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -409,6 +409,12 @@ public void alterTable(String tblName, Table newTbl) } } + public void alterIndex(String baseTableName, String indexName, Index newIdx) + throws InvalidOperationException, HiveException { + String[] names = Utilities.getDbTableName(baseTableName); + alterIndex(names[0], names[1], indexName, newIdx); + } + /** * Updates the existing index metadata with the new metadata. * @@ -667,17 +673,16 @@ public void createIndex(String tableName, String indexName, String indexHandlerC throws HiveException { try { - String dbName = SessionState.get().getCurrentDatabase(); Index old_index = null; try { - old_index = getIndex(dbName, tableName, indexName); + old_index = getIndex(tableName, indexName); } catch (Exception e) { } if (old_index != null) { - throw new HiveException("Index " + indexName + " already exists on table " + tableName + ", db=" + dbName); + throw new HiveException("Index " + indexName + " already exists on table " + tableName); } - org.apache.hadoop.hive.metastore.api.Table baseTbl = getMSC().getTable(dbName, tableName); + org.apache.hadoop.hive.metastore.api.Table baseTbl = getTable(tableName).getTTable(); if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) { throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported."); } @@ -686,17 +691,13 @@ public void createIndex(String tableName, String indexName, String indexHandlerC + " is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported."); } - if (indexTblName == null) { - indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName); - } else { - org.apache.hadoop.hive.metastore.api.Table temp = null; - try { - temp = getMSC().getTable(dbName, indexTblName); - } catch (Exception e) { - } - if (temp != null) { - throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); - } + org.apache.hadoop.hive.metastore.api.Table temp = null; + try { + temp = getTable(indexTblName).getTTable(); + } catch (Exception e) { + } + if (temp != null) { + throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); } org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().deepCopy(); @@ -774,7 +775,9 @@ public void createIndex(String tableName, String indexName, String indexHandlerC HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); if (indexHandler.usesIndexTable()) { - tt = new org.apache.hadoop.hive.ql.metadata.Table(dbName, indexTblName).getTTable(); + String idname = Utilities.getDatabaseName(indexTblName); + String itname = Utilities.getTableName(indexTblName); + tt = new org.apache.hadoop.hive.ql.metadata.Table(idname, itname).getTTable(); List partKeys = baseTbl.getPartitionKeys(); tt.setPartitionKeys(partKeys); tt.setTableType(TableType.INDEX_TABLE.toString()); @@ -798,7 +801,9 @@ public void createIndex(String tableName, String indexName, String indexHandlerC throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \"."); } - Index indexDesc = new Index(indexName, indexHandlerClass, dbName, tableName, time, time, indexTblName, + String tdname = Utilities.getDatabaseName(tableName); + String ttname = Utilities.getTableName(tableName); + Index indexDesc = new Index(indexName, indexHandlerClass, tdname, ttname, time, time, indexTblName, storageDescriptor, params, deferredRebuild); if (indexComment != null) { indexDesc.getParameters().put("comment", indexComment); @@ -818,19 +823,6 @@ public void createIndex(String tableName, String indexName, String indexHandlerC } } - public Index getIndex(String qualifiedIndexName) throws HiveException { - String[] names = getQualifiedNames(qualifiedIndexName); - switch (names.length) { - case 3: - return getIndex(names[0], names[1], names[2]); - case 2: - return getIndex(SessionState.get().getCurrentDatabase(), - names[0], names[1]); - default: - throw new HiveException("Invalid index name:" + qualifiedIndexName); - } - } - public Index getIndex(String baseTableName, String indexName) throws HiveException { String[] names = Utilities.getDbTableName(baseTableName); return this.getIndex(names[0], names[1], indexName); @@ -845,6 +837,11 @@ public Index getIndex(String dbName, String baseTableName, } } + public boolean dropIndex(String baseTableName, String index_name, boolean deleteData) throws HiveException { + String[] names = Utilities.getDbTableName(baseTableName); + return dropIndex(names[0], names[1], index_name, deleteData); + } + public boolean dropIndex(String db_name, String tbl_name, String index_name, boolean deleteData) throws HiveException { try { return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java index ae87aac..44844dd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask; @@ -103,7 +104,8 @@ private IndexUtils(){ return indexTables; } for (Index index : indexes.get(table)) { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); indexToIndexTable.put(index, indexTable); indexTables.add(indexTable); } @@ -121,7 +123,8 @@ private IndexUtils(){ return indexTables; } for (Index index : indexes.get(partitionedTable)) { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); indexToIndexTable.put(index, indexTable); indexTables.add(indexTable); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java index 11a6d07..6d3b77b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.AggregateIndexHandler; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -373,8 +374,9 @@ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableSca // index is changed. List idxTblColNames = new ArrayList(); try { - Table idxTbl = hiveInstance.getTable(index.getDbName(), + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table idxTbl = hiveInstance.getTable(qualified[0], qualified[1]); for (FieldSchema idxTblCol : idxTbl.getCols()) { idxTblColNames.add(idxTblCol.getName()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 22945e3..2633819 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -34,6 +34,7 @@ import org.antlr.runtime.tree.CommonTree; import org.antlr.runtime.tree.Tree; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -305,6 +306,27 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD return unescapeIdentifier(tableOrColumnNode.getText()); } + public static String[] getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { + if (tabNameNode.getToken().getType() != HiveParser.TOK_TABNAME) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME.getMsg(tabNameNode)); + } + if (tabNameNode.getChildCount() == 2) { + String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); + return new String[] {dbName, tableName}; + } + String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + return new String[]{SessionState.get().getCurrentDatabase(), tableName}; + } + + public static String getDotName(String[] qname) throws SemanticException { + String genericName = StringUtils.join(qname, "."); + if (qname.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName); + } + return genericName; + } + /** * Get the unqualified name from a table node. * @@ -817,9 +839,9 @@ public void setColumnAccessInfo(ColumnAccessInfo columnAccessInfo) { this.columnAccessInfo = columnAccessInfo; } - protected HashMap extractPartitionSpecs(Tree partspec) + protected LinkedHashMap extractPartitionSpecs(Tree partspec) throws SemanticException { - HashMap partSpec = new LinkedHashMap(); + LinkedHashMap partSpec = new LinkedHashMap(); for (int i = 0; i < partspec.getChildCount(); ++i) { CommonTree partspec_val = (CommonTree) partspec.getChild(i); String val = stripQuotes(partspec_val.getChild(1).getText()); @@ -1176,23 +1198,16 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem } } + protected Table getTable(String[] qualified) throws SemanticException { + return getTable(qualified[0], qualified[1], true); + } + protected Table getTable(String tblName) throws SemanticException { return getTable(null, tblName, true); } protected Table getTable(String tblName, boolean throwException) throws SemanticException { - String currentDb = SessionState.get().getCurrentDatabase(); - return getTable(currentDb, tblName, throwException); - } - - // qnName : possibly contains database name (dot separated) - protected Table getTableWithQN(String qnName, boolean throwException) throws SemanticException { - int dot = qnName.indexOf('.'); - if (dot < 0) { - String currentDb = SessionState.get().getCurrentDatabase(); - return getTable(currentDb, qnName, throwException); - } - return getTable(qnName.substring(0, dot), qnName.substring(dot + 1), throwException); + return getTable(null, tblName, throwException); } protected Table getTable(String database, String tblName, boolean throwException) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index c0322fb..2522e7a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -209,14 +209,14 @@ public static String getTypeName(ASTNode node) throws SemanticException { } static class TablePartition { - String tableName; + String[] qualified; HashMap partSpec = null; public TablePartition() { } public TablePartition(ASTNode tblPart) throws SemanticException { - tableName = unescapeIdentifier(tblPart.getChild(0).getText()); + qualified = getQualifiedTableName((ASTNode) tblPart.getChild(0)); if (tblPart.getChildCount() > 1) { ASTNode part = (ASTNode) tblPart.getChild(1); if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) { @@ -250,7 +250,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { case HiveParser.TOK_ALTERTABLE_PARTITION: { ASTNode tablePart = (ASTNode) ast.getChild(0); TablePartition tblPart = new TablePartition(tablePart); - String tableName = tblPart.tableName; + String tableName = getDotName(tblPart.qualified); HashMap partSpec = tblPart.partSpec; ast = (ASTNode) ast.getChild(1); if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { @@ -1014,7 +1014,7 @@ private boolean isFullSpec(Table table, Map partSpec) { private void analyzeCreateIndex(ASTNode ast) throws SemanticException { String indexName = unescapeIdentifier(ast.getChild(0).getText()); String typeName = unescapeSQLString(ast.getChild(1).getText()); - String tableName = getUnescapedName((ASTNode) ast.getChild(2)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(2)); List indexedCols = getColumnNames((ASTNode) ast.getChild(3)); IndexType indexType = HiveIndex.getIndexType(typeName); @@ -1080,8 +1080,14 @@ private void analyzeCreateIndex(ASTNode ast) throws SemanticException { storageFormat.fillDefaultStorageFormat(); + if (indexTableName == null) { + indexTableName = MetaStoreUtils.getIndexTableName(qualified[0], qualified[1], indexName); + indexTableName = qualified[0] + "." + indexTableName; // on same database with base table + } else { + indexTableName = getDotName(Utilities.getDbTableName(indexTableName)); + } - CreateIndexDesc crtIndexDesc = new CreateIndexDesc(tableName, indexName, + CreateIndexDesc crtIndexDesc = new CreateIndexDesc(getDotName(qualified), indexName, indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps, @@ -1115,21 +1121,20 @@ private void analyzeDropIndex(ASTNode ast) throws SemanticException { } private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { - String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); + String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0)); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap partSpec = null; Tree part = ast.getChild(2); if (part != null) { partSpec = extractPartitionSpecs(part); } - List> indexBuilder = getIndexBuilderMapRed(baseTableName, indexName, partSpec); + List> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec); rootTasks.addAll(indexBuilder); // Handle updating index timestamps AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP); + alterIdxDesc.setBaseTableName(getDotName(qualified)); alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); alterIdxDesc.setSpec(partSpec); Task tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf); @@ -1141,27 +1146,28 @@ private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { private void analyzeAlterIndexProps(ASTNode ast) throws SemanticException { - String baseTableName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) .getChild(0)); - AlterIndexDesc alterIdxDesc = - new AlterIndexDesc(AlterIndexTypes.ADDPROPS); - alterIdxDesc.setProps(mapProp); + AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.ADDPROPS); + alterIdxDesc.setBaseTableName(getDotName(qualified)); alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); + alterIdxDesc.setProps(mapProp); rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); } - private List> getIndexBuilderMapRed(String baseTableName, String indexName, + private List> getIndexBuilderMapRed(String[] names, String indexName, HashMap partSpec) throws SemanticException { try { - String dbName = SessionState.get().getCurrentDatabase(); - Index index = db.getIndex(dbName, baseTableName, indexName); - Table indexTbl = getTable(index.getIndexTableName()); + Index index = db.getIndex(names[0], names[1], indexName); + Table indexTbl = null; + String indexTableName = index.getIndexTableName(); + if (indexTableName != null) { + indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName)); + } String baseTblName = index.getOrigTableName(); Table baseTbl = getTable(baseTblName); @@ -1630,7 +1636,7 @@ private void analyzeAlterTableCompact(ASTNode ast, String tableName, LinkedHashMap newPartSpec = null; if (partSpec != null) newPartSpec = new LinkedHashMap(partSpec); - AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get().getCurrentDatabase(), + AlterTableSimpleDesc desc = new AlterTableSimpleDesc( tableName, newPartSpec, type); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); @@ -2095,25 +2101,14 @@ private void analyzeShowTables(ASTNode ast) throws SemanticException { } private void analyzeShowColumns(ASTNode ast) throws SemanticException { - ShowColumnsDesc showColumnsDesc; - String dbName = null; - String tableName = null; - switch (ast.getChildCount()) { - case 1: - tableName = getUnescapedName((ASTNode) ast.getChild(0)); - break; - case 2: - dbName = getUnescapedName((ASTNode) ast.getChild(0)); - tableName = getUnescapedName((ASTNode) ast.getChild(1)); - break; - default: - break; + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + if (ast.getChildCount() > 1) { + qualified[0] = getUnescapedName((ASTNode) ast.getChild(1)); } - - Table tab = getTable(dbName, tableName, true); + Table tab = getTable(qualified); inputs.add(new ReadEntity(tab)); - showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName); + ShowColumnsDesc showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), getDotName(qualified)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc), conf)); setFetchTask(createFetchTask(showColumnsDesc.getSchema())); @@ -2154,13 +2149,13 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { ShowTblPropertiesDesc showTblPropertiesDesc; - String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); - String dbName = SessionState.get().getCurrentDatabase(); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { propertyName = unescapeSQLString(ast.getChild(1).getText()); } + String tableNames = getDotName(qualified); validateTable(tableNames, null); showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, @@ -2508,7 +2503,7 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, List> partSpecs = new ArrayList>(); partSpecs.add(oldPartSpec); partSpecs.add(newPartSpec); - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); + addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( SessionState.get().getCurrentDatabase(), tblName, oldPartSpec, newPartSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -2556,8 +2551,8 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) // popular case but that's kinda hacky. Let's not do it for now. boolean canGroupExprs = ifExists; - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tblName, true); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + Table tab = getTable(qualified); Map> partSpecs = getFullPartitionSpecs(ast, tab, canGroupExprs); if (partSpecs.isEmpty()) return; // nothing to do @@ -2571,24 +2566,19 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection); DropTableDesc dropTblDesc = - new DropTableDesc(tblName, partSpecs, expectView, ignoreProtection); + new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } private void analyzeAlterTablePartColType(ASTNode ast) throws SemanticException { // get table name - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - Table tab = null; // check if table exists. - try { - tab = getTable(tblName, true); - inputs.add(new ReadEntity(tab)); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(qualified); + inputs.add(new ReadEntity(tab)); // validate the DDL is a valid operation on the table. validateAlterTableType(tab, AlterTableTypes.ALTERPARTITION, false); @@ -2622,7 +2612,7 @@ private void analyzeAlterTablePartColType(ASTNode ast) } AlterTableAlterPartDesc alterTblAlterPartDesc = - new AlterTableAlterPartDesc(SessionState.get().getCurrentDatabase(), tblName, newCol); + new AlterTableAlterPartDesc(qualified[0], qualified[1], newCol); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblAlterPartDesc), conf)); @@ -2645,10 +2635,10 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) throws SemanticException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS; - Table tab = getTable(tblName, true); + Table tab = getTable(qualified); boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); @@ -2659,7 +2649,8 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) String currentLocation = null; Map currentPart = null; // Parser has done some verification, so the order of tokens doesn't need to be verified here. - AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists); + AddPartitionDesc addPartitionDesc = + new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists); for (int num = start; num < numCh; num++) { ASTNode child = (ASTNode) ast.getChild(num); switch (child.getToken().getType()) { @@ -2680,7 +2671,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) currentLocation = unescapeSQLString(child.getChild(0).getText()); boolean isLocal = false; try { - // do best effor to determine if this is a local file + // do best effort to determine if this is a local file String scheme = new URI(currentLocation).getScheme(); if (scheme != null) { isLocal = FileUtils.isLocalFile(conf, currentLocation); @@ -2711,7 +2702,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) // Compile internal query to capture underlying table partition dependencies StringBuilder cmd = new StringBuilder(); cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(tblName)); + cmd.append(HiveUtils.unparseIdentifier(getDotName(qualified))); cmd.append(" WHERE "); boolean firstOr = true; for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { @@ -2772,9 +2763,9 @@ private Partition getPartitionForOutput(Table tab, Map currentPa */ private void analyzeAlterTableTouch(CommonTree ast) throws SemanticException { + String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0)); - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab = getTable(tblName, true); + Table tab = getTable(qualified); validateAlterTableType(tab, AlterTableTypes.TOUCH); inputs.add(new ReadEntity(tab)); @@ -2783,16 +2774,16 @@ private void analyzeAlterTableTouch(CommonTree ast) if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, null, + getDotName(qualified), null, AlterTableDesc.AlterTableTypes.TOUCH); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); } else { - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); + addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); for (Map partSpec : partSpecs) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, + getDotName(qualified), partSpec, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); @@ -2807,12 +2798,12 @@ private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); // partition name to value List> partSpecs = getPartitionSpecs(ast); - Table tab = getTable(tblName, true); - addTablePartsOutputs(tblName, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); + Table tab = getTable(qualified); + addTablePartsOutputs(tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); validateAlterTableType(tab, AlterTableTypes.ARCHIVE); inputs.add(new ReadEntity(tab)); @@ -2832,7 +2823,7 @@ private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) throw new SemanticException(e.getMessage(), e); } AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, + getDotName(qualified), partSpec, (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc), conf)); @@ -3000,20 +2991,20 @@ private void validatePartitionValues(Map partSpec) * Add the table partitions to be modified in the output, so that it is available for the * pre-execution hook. If the partition does not exist, no error is thrown. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, WriteEntity.WriteType writeType) throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, false, null, writeType); + addTablePartsOutputs(table, partSpecs, false, false, null, writeType); } /** * Add the table partitions to be modified in the output, so that it is available for the * pre-execution hook. If the partition does not exist, no error is thrown. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, boolean allowMany, WriteEntity.WriteType writeType) throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, allowMany, null, writeType); + addTablePartsOutputs(table, partSpecs, false, allowMany, null, writeType); } /** @@ -3021,10 +3012,9 @@ private void addTablePartsOutputs(String tblName, List> part * pre-execution hook. If the partition does not exist, throw an error if * throwIfNonExistent is true, otherwise ignore it. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) throws SemanticException { - Table tab = getTable(tblName); Iterator> i; int index; @@ -3033,7 +3023,7 @@ private void addTablePartsOutputs(String tblName, List> part List parts = null; if (allowMany) { try { - parts = db.getPartitions(tab, partSpec); + parts = db.getPartitions(table, partSpec); } catch (HiveException e) { LOG.error("Got HiveException during obtaining list of partitions" + StringUtils.stringifyException(e)); @@ -3042,7 +3032,7 @@ private void addTablePartsOutputs(String tblName, List> part } else { parts = new ArrayList(); try { - Partition p = db.getPartition(tab, partSpec, false); + Partition p = db.getPartition(table, partSpec, false); if (p != null) { parts.add(p); } @@ -3122,17 +3112,18 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { */ HiveConf hiveConf = SessionState.get().getConf(); - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tableName, true); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + Table tab = getTable(qualified); inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); + String name = getDotName(qualified); if (ast.getChildCount() == 1) { /* Convert a skewed table to non-skewed table. */ - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, + AlterTableDesc alterTblDesc = new AlterTableDesc(name, true, new ArrayList(), new ArrayList>()); alterTblDesc.setStoredAsSubDirectories(false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -3140,10 +3131,10 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { } else { switch (((ASTNode) ast.getChild(1)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: - handleAlterTableSkewedBy(ast, tableName, tab); + handleAlterTableSkewedBy(ast, name, tab); break; case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); + handleAlterTableDisableStoredAsDirs(name, tab); break; default: assert false; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index f5d0602..705dc59 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -973,16 +973,14 @@ alterViewStatementSuffix alterIndexStatementSuffix @init { pushMsg("alter index statement", state); } @after { popMsg(state); } - : indexName=identifier - (KW_ON tableNameId=identifier) - partitionSpec? + : indexName=identifier KW_ON tableName partitionSpec? ( KW_REBUILD - ->^(TOK_ALTERINDEX_REBUILD $tableNameId $indexName partitionSpec?) + ->^(TOK_ALTERINDEX_REBUILD tableName $indexName partitionSpec?) | KW_SET KW_IDXPROPERTIES indexProperties - ->^(TOK_ALTERINDEX_PROPERTIES $tableNameId $indexName indexProperties) + ->^(TOK_ALTERINDEX_PROPERTIES tableName $indexName indexProperties) ) ; @@ -1038,8 +1036,8 @@ alterStatementChangeColPosition alterStatementSuffixAddPartitions @init { pushMsg("add partition statement", state); } @after { popMsg(state); } - : identifier KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ - -> ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) + : tableName KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ + -> ^(TOK_ALTERTABLE_ADDPARTS tableName ifNotExists? alterStatementSuffixAddPartitionsElement+) ; alterStatementSuffixAddPartitionsElement @@ -1049,22 +1047,22 @@ alterStatementSuffixAddPartitionsElement alterStatementSuffixTouch @init { pushMsg("touch statement", state); } @after { popMsg(state); } - : identifier KW_TOUCH (partitionSpec)* - -> ^(TOK_ALTERTABLE_TOUCH identifier (partitionSpec)*) + : tableName KW_TOUCH (partitionSpec)* + -> ^(TOK_ALTERTABLE_TOUCH tableName (partitionSpec)*) ; alterStatementSuffixArchive @init { pushMsg("archive statement", state); } @after { popMsg(state); } - : identifier KW_ARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_ARCHIVE identifier (partitionSpec)*) + : tableName KW_ARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_ARCHIVE tableName (partitionSpec)*) ; alterStatementSuffixUnArchive @init { pushMsg("unarchive statement", state); } @after { popMsg(state); } - : identifier KW_UNARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_UNARCHIVE identifier (partitionSpec)*) + : tableName KW_UNARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_UNARCHIVE tableName (partitionSpec)*) ; partitionLocation @@ -1077,26 +1075,26 @@ partitionLocation alterStatementSuffixDropPartitions @init { pushMsg("drop partition statement", state); } @after { popMsg(state); } - : identifier KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? - -> ^(TOK_ALTERTABLE_DROPPARTS identifier dropPartitionSpec+ ifExists? ignoreProtection?) + : tableName KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? + -> ^(TOK_ALTERTABLE_DROPPARTS tableName dropPartitionSpec+ ifExists? ignoreProtection?) ; alterStatementSuffixProperties @init { pushMsg("alter properties statement", state); } @after { popMsg(state); } - : name=identifier KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERTABLE_PROPERTIES $name tableProperties) - | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPTABLE_PROPERTIES $name tableProperties ifExists?) + : tableName KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERTABLE_PROPERTIES tableName tableProperties) + | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_DROPTABLE_PROPERTIES tableName tableProperties ifExists?) ; alterViewSuffixProperties @init { pushMsg("alter view properties statement", state); } @after { popMsg(state); } - : name=identifier KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERVIEW_PROPERTIES $name tableProperties) - | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPVIEW_PROPERTIES $name tableProperties ifExists?) + : tableName KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERVIEW_PROPERTIES tableName tableProperties) + | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_DROPVIEW_PROPERTIES tableName tableProperties ifExists?) ; alterStatementSuffixSerdeProperties @@ -1111,8 +1109,8 @@ alterStatementSuffixSerdeProperties tablePartitionPrefix @init {pushMsg("table partition prefix", state);} @after {popMsg(state);} - :name=identifier partitionSpec? - ->^(TOK_TABLE_PARTITION $name partitionSpec?) + : tableName partitionSpec? + ->^(TOK_TABLE_PARTITION tableName partitionSpec?) ; alterTblPartitionStatement @@ -1191,21 +1189,21 @@ alterStatementSuffixLocation alterStatementSuffixSkewedby @init {pushMsg("alter skewed by statement", state);} @after{popMsg(state);} - :name=identifier tableSkewed - ->^(TOK_ALTERTABLE_SKEWED $name tableSkewed) + : tableName tableSkewed + ->^(TOK_ALTERTABLE_SKEWED tableName tableSkewed) | - name=identifier KW_NOT KW_SKEWED - ->^(TOK_ALTERTABLE_SKEWED $name) + tableName KW_NOT KW_SKEWED + ->^(TOK_ALTERTABLE_SKEWED tableName) | - name=identifier KW_NOT storedAsDirs - ->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs) + tableName KW_NOT storedAsDirs + ->^(TOK_ALTERTABLE_SKEWED tableName storedAsDirs) ; alterStatementSuffixExchangePartition @init {pushMsg("alter exchange partition", state);} @after{popMsg(state);} - : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName - -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename) + : tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName + -> ^(TOK_EXCHANGEPARTITION tableName partitionSpec $exchangename) ; alterStatementSuffixProtectMode @@ -1314,14 +1312,14 @@ showStatement @after { popMsg(state); } : KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?) | KW_SHOW KW_TABLES ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWTABLES (TOK_FROM $db_name)? showStmtIdentifier?) - | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tabname=tableName ((KW_FROM|KW_IN) db_name=identifier)? - -> ^(TOK_SHOWCOLUMNS $db_name? $tabname) + | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? + -> ^(TOK_SHOWCOLUMNS tableName $db_name?) | KW_SHOW KW_FUNCTIONS showFunctionIdentifier? -> ^(TOK_SHOWFUNCTIONS showFunctionIdentifier?) | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) | KW_SHOW KW_CREATE KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) - | KW_SHOW KW_TBLPROPERTIES tblName=identifier (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES $tblName $prptyName?) + | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?) | KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) | KW_SHOW KW_LOCKS KW_DATABASE (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? @@ -1454,26 +1452,25 @@ privilegeIncludeColObject @init {pushMsg("privilege object including columns", state);} @after {popMsg(state);} : KW_ALL -> ^(TOK_RESOURCE_ALL) - | privObjectType identifier (LPAREN cols=columnNameList RPAREN)? partitionSpec? - -> ^(TOK_PRIV_OBJECT_COL identifier privObjectType $cols? partitionSpec?) + | privObjectCols -> ^(TOK_PRIV_OBJECT_COL privObjectCols) ; privilegeObject -@init {pushMsg("privilege subject", state);} +@init {pushMsg("privilege object", state);} @after {popMsg(state);} - : KW_ON privObjectType identifier partitionSpec? - -> ^(TOK_PRIV_OBJECT identifier privObjectType partitionSpec?) + : KW_ON privObject -> ^(TOK_PRIV_OBJECT privObject) ; - // database or table type. Type is optional, default type is table -privObjectType -@init {pushMsg("privilege object type type", state);} -@after {popMsg(state);} - : KW_DATABASE -> ^(TOK_DB_TYPE) - | KW_TABLE? -> ^(TOK_TABLE_TYPE) +privObject + : KW_DATABASE identifier -> ^(TOK_DB_TYPE identifier) + | KW_TABLE? tableName partitionSpec? -> ^(TOK_TABLE_TYPE tableName partitionSpec?) ; +privObjectCols + : KW_DATABASE identifier -> ^(TOK_DB_TYPE identifier) + | KW_TABLE? tableName (LPAREN cols=columnNameList RPAREN)? partitionSpec? -> ^(TOK_TABLE_TYPE tableName $cols? partitionSpec?) + ; privilegeList @init {pushMsg("grant privilege list", state);} @@ -1543,8 +1540,8 @@ withAdminOption metastoreCheck @init { pushMsg("metastore check statement", state); } @after { popMsg(state); } - : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE table=identifier partitionSpec? (COMMA partitionSpec)*)? - -> ^(TOK_MSCK $repair? ($table partitionSpec*)?) + : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE tableName partitionSpec? (COMMA partitionSpec)*)? + -> ^(TOK_MSCK $repair? (tableName partitionSpec*)?) ; resourceList diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java index 856ec2f..9f12893 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -137,7 +138,9 @@ private void doIndexUpdate(Index index, Map partSpec) throws private boolean containsPartition(Index index, Map partSpec) throws HiveException { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), + index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); List parts = hive.getPartitions(indexTable, partSpec); return (parts == null || parts.size() == 0); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d38270c..fd8c1f3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10155,7 +10155,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) // check for existence of table if (ifNotExists) { try { - Table table = getTableWithQN(tableName, false); + Table table = getTable(tableName, false); if (table != null) { // table exists return null; } @@ -10220,7 +10220,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) tblProps = addDefaultProperties(tblProps); if (isTemporary) { - Table likeTable = getTableWithQN(likeTableName, false); + Table likeTable = getTable(likeTableName, false); if (likeTable != null && likeTable.getPartCols().size() > 0) { throw new SemanticException("Partition columns are not supported on temporary tables " + "and source table in CREATE TABLE LIKE is partitioned."); @@ -10340,7 +10340,7 @@ private ASTNode analyzeCreateView(ASTNode ast, QB qb) private void validateCreateView(CreateViewDesc createVwDesc) throws SemanticException { try { - Table oldView = getTableWithQN(createVwDesc.getViewName(), false); + Table oldView = getTable(createVwDesc.getViewName(), false); // ALTER VIEW AS SELECT requires the view must exist if (createVwDesc.getIsAlterViewAs() && oldView == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index c32d81e..a678ec1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -156,7 +156,6 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { PrincipalDesc principalDesc = null; PrivilegeObjectDesc privHiveObj = null; - List cols = null; ASTNode param = null; if (ast.getChildCount() > 0) { @@ -171,30 +170,12 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { if (param.getType() == HiveParser.TOK_RESOURCE_ALL) { privHiveObj = new PrivilegeObjectDesc(); } else if (param.getType() == HiveParser.TOK_PRIV_OBJECT_COL) { - privHiveObj = new PrivilegeObjectDesc(); - //set object name - String text = param.getChild(0).getText(); - privHiveObj.setObject(BaseSemanticAnalyzer.unescapeIdentifier(text)); - //set object type - ASTNode objTypeNode = (ASTNode) param.getChild(1); - privHiveObj.setTable(objTypeNode.getToken().getType() == HiveParser.TOK_TABLE_TYPE); - - //set col and partition spec if specified - for (int i = 2; i < param.getChildCount(); i++) { - ASTNode partOrCol = (ASTNode) param.getChild(i); - if (partOrCol.getType() == HiveParser.TOK_PARTSPEC) { - privHiveObj.setPartSpec(DDLSemanticAnalyzer.getPartSpec(partOrCol)); - } else if (partOrCol.getType() == HiveParser.TOK_TABCOLNAME) { - cols = BaseSemanticAnalyzer.getColumnNames(partOrCol); - } else { - throw new SemanticException("Invalid token type " + partOrCol.getType()); - } - } + privHiveObj = parsePrivObject(param); } } ShowGrantDesc showGrant = new ShowGrantDesc(resultFile.toString(), - principalDesc, privHiveObj, cols); + principalDesc, privHiveObj); return TaskFactory.get(new DDLWork(inputs, outputs, showGrant), conf); } @@ -214,7 +195,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { boolean isAdmin = false; if((isGrant && wAdminOption.getToken().getType() == HiveParser.TOK_GRANT_WITH_ADMIN_OPTION) || (!isGrant && wAdminOption.getToken().getType() == HiveParser.TOK_ADMIN_OPTION_FOR)){ - rolesStartPos = 2; //start reading role names from next postion + rolesStartPos = 2; //start reading role names from next position isAdmin = true; } @@ -237,20 +218,10 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, HashSet outputs) throws SemanticException { - PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); - //set object identifier - subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText())); - //set object type - ASTNode objTypeNode = (ASTNode) ast.getChild(1); - subject.setTable(objTypeNode.getToken().getType() == HiveParser.TOK_TABLE_TYPE); - if (ast.getChildCount() == 3) { - //if partition spec node is present, set partition spec - ASTNode partSpecNode = (ASTNode) ast.getChild(2); - subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(partSpecNode)); - } + PrivilegeObjectDesc subject = parsePrivObject(ast); if (subject.getTable()) { - Table tbl = getTable(SessionState.get().getCurrentDatabase(), subject.getObject()); + Table tbl = getTable(subject.getObject()); if (subject.getPartSpec() != null) { Partition part = getPartition(tbl, subject.getPartSpec()); outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); @@ -262,6 +233,30 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, return subject; } + private PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { + PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); + ASTNode child = (ASTNode) ast.getChild(0); + ASTNode gchild = (ASTNode)child.getChild(0); + if (child.getType() == HiveParser.TOK_TABLE_TYPE) { + subject.setTable(true); + String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); + subject.setObject(BaseSemanticAnalyzer.getDotName(qualified)); + } else { + subject.setTable(false); + subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(gchild.getText())); + } + //if partition spec node is present, set partition spec + for (int i = 1; i < child.getChildCount(); i++) { + gchild = (ASTNode) child.getChild(i); + if (gchild.getType() == HiveParser.TOK_PARTSPEC) { + subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(gchild)); + } else if (gchild.getType() == HiveParser.TOK_TABCOLNAME) { + subject.setColumns(BaseSemanticAnalyzer.getColumnNames(gchild)); + } + } + return subject; + } + private List analyzePrivilegeListDef(ASTNode node) throws SemanticException { List ret = new ArrayList(); @@ -284,6 +279,10 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, return ret; } + private Table getTable(String tblName) throws SemanticException { + return getTable(null, tblName); + } + private Table getTable(String database, String tblName) throws SemanticException { try { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java index 0318e4b..db2cf7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java @@ -19,13 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; -import java.util.ArrayList; import java.util.Map; -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.ql.exec.Utilities; /** * AlterIndexDesc. @@ -36,7 +30,6 @@ private static final long serialVersionUID = 1L; private String indexName; private String baseTable; - private String dbName; private Map partSpec; // partition specification of partitions touched private Map props; @@ -105,21 +98,6 @@ public void setSpec(Map partSpec) { } /** - * @return the name of the database that the base table is in - */ - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * the dbName to set - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** * @return the op */ public AlterIndexTypes getOp() { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java index 541675c..d819d15 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java @@ -29,7 +29,6 @@ */ public class AlterTableSimpleDesc extends DDLDesc { private String tableName; - private String dbName; private LinkedHashMap partSpec; private String compactionType; @@ -39,17 +38,12 @@ public AlterTableSimpleDesc() { } /** - * @param dbName - * database that contains the table / partition * @param tableName * table containing the partition * @param partSpec - * partition specification. Null if touching a table. */ - public AlterTableSimpleDesc(String dbName, String tableName, - Map partSpec, AlterTableDesc.AlterTableTypes type) { - super(); - this.dbName = dbName; + public AlterTableSimpleDesc(String tableName, + Map partSpec, AlterTableTypes type) { this.tableName = tableName; if(partSpec == null) { this.partSpec = null; @@ -61,16 +55,14 @@ public AlterTableSimpleDesc(String dbName, String tableName, /** * Constructor for ALTER TABLE ... COMPACT. - * @param dbname name of the database containing the table * @param tableName name of the table to compact * @param partSpec partition to compact * @param compactionType currently supported values: 'major' and 'minor' */ - public AlterTableSimpleDesc(String dbname, String tableName, - LinkedHashMap partSpec, String compactionType) { + public AlterTableSimpleDesc(String tableName, + LinkedHashMap partSpec, String compactionType) { type = AlterTableTypes.COMPACT; this.compactionType = compactionType; - this.dbName = dbname; this.tableName = tableName; this.partSpec = partSpec; } @@ -83,14 +75,6 @@ public void setTableName(String tableName) { this.tableName = tableName; } - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - public AlterTableDesc.AlterTableTypes getType() { return type; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java index 9417220..5265289 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.util.HashMap; +import java.util.List; @Explain(displayName="privilege subject") public class PrivilegeObjectDesc { @@ -30,6 +31,8 @@ private HashMap partSpec; + private List columns; + public PrivilegeObjectDesc(boolean isTable, String object, HashMap partSpec) { super(); @@ -68,4 +71,11 @@ public void setPartSpec(HashMap partSpec) { this.partSpec = partSpec; } + public List getColumns() { + return columns; + } + + public void setColumns(List columns) { + this.columns = columns; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java index fe6a91e..28d16a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java @@ -23,7 +23,6 @@ public class ShowColumnsDesc extends DDLDesc implements Serializable { private static final long serialVersionUID = 1L; - String dbName; String tableName; String resFile; /** @@ -63,16 +62,6 @@ public ShowColumnsDesc(Path resFile, String tableName) { } /** - * @param dbName name of the database - * @param tableName name of table to show columns of - */ - public ShowColumnsDesc(Path resFile, String dbName, String tableName) { - this.resFile = resFile.toString(); - this.dbName = dbName; - this.tableName = tableName; - } - - /** * @return the tableName */ @Explain(displayName = "table name") @@ -103,12 +92,4 @@ public String getResFile() { public void setResFile(String resFile) { this.resFile = resFile; } - - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java index aa88153..d27da3d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hive.ql.plan; -import java.util.List; - @Explain(displayName="show grant desc") public class ShowGrantDesc { @@ -26,8 +24,6 @@ private PrivilegeObjectDesc hiveObj; - private List columns; - private String resFile; /** @@ -42,11 +38,10 @@ public ShowGrantDesc(){ } public ShowGrantDesc(String resFile, PrincipalDesc principalDesc, - PrivilegeObjectDesc subjectObj, List columns) { + PrivilegeObjectDesc subjectObj) { this.resFile = resFile; this.principalDesc = principalDesc; this.hiveObj = subjectObj; - this.columns = columns; } public static String getSchema() { @@ -78,12 +73,4 @@ public String getResFile() { public void setResFile(String resFile) { this.resFile = resFile; } - - public List getColumns() { - return columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java index 5f32d5f..c5a0b8d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -79,8 +79,7 @@ public void testNonPartitionedTable() throws Exception { boolean sawException = false; AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo compact 'major'"); Assert.assertEquals("major", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); } @Test @@ -100,8 +99,7 @@ public void testMajor() throws Exception { AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo partition(ds = 'today') compact 'major'"); Assert.assertEquals("major", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); HashMap parts = desc.getPartSpec(); Assert.assertEquals(1, parts.size()); Assert.assertEquals("today", parts.get("ds")); @@ -112,8 +110,7 @@ public void testMinor() throws Exception { AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo partition(ds = 'today') compact 'minor'"); Assert.assertEquals("minor", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); HashMap parts = desc.getPartSpec(); Assert.assertEquals(1, parts.size()); Assert.assertEquals("today", parts.get("ds")); diff --git ql/src/test/results/clientpositive/drop_multi_partitions.q.out ql/src/test/results/clientpositive/drop_multi_partitions.q.out index eae57f3..87e4e61 100644 --- ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -42,7 +42,8 @@ POSTHOOK: type: ALTERTABLE_DROPPARTS ABSTRACT SYNTAX TREE: TOK_ALTERTABLE_DROPPARTS - mp + TOK_TABNAME + mp TOK_PARTSPEC TOK_PARTVAL b @@ -57,7 +58,7 @@ STAGE PLANS: Stage: Stage-0 Drop Table Operator: Drop Table - table: mp + table: default.mp PREHOOK: query: alter table mp drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS diff --git ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out index 21bd257..16aa42d 100644 --- ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out +++ ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out @@ -213,13 +213,19 @@ POSTHOOK: Input: db2@destintable@ds=2011-11-11 97 val_97 PREHOOK: query: drop table db2.destinTable PREHOOK: type: DROPTABLE +PREHOOK: Input: db2@destintable +PREHOOK: Output: db2@destintable POSTHOOK: query: drop table db2.destinTable POSTHOOK: type: DROPTABLE +POSTHOOK: Input: db2@destintable POSTHOOK: Output: db2@destintable PREHOOK: query: drop table db1.sourceTable PREHOOK: type: DROPTABLE +PREHOOK: Input: db1@sourcetable +PREHOOK: Output: db1@sourcetable POSTHOOK: query: drop table db1.sourceTable POSTHOOK: type: DROPTABLE +POSTHOOK: Input: db1@sourcetable POSTHOOK: Output: db1@sourcetable PREHOOK: query: DROP DATABASE db1 PREHOOK: type: DROPDATABASE