diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index c91b15c..789ef76 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -29,7 +29,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -110,7 +109,7 @@ public void testInputSomeColumnsUsed() throws HiveAuthzPluginException, HiveAcce getSortedList(tableObj.getColumns())); } - private List getSortedList(Set columns) { + private List getSortedList(List columns) { List sortedCols = new ArrayList(columns); Collections.sort(sortedCols); return sortedCols; diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java index 14fc430..f6058e4 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/CheckColumnAccessHook.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hive.ql.hooks; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.HashMap; -import java.util.Set; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.conf.HiveConf; @@ -34,7 +34,7 @@ /* * This hook is used for verifying the column access information * that is generated and maintained in the QueryPlan object by the - * ColumnAccessAnalyer. All the hook does is print out the columns + * ColumnAccessAnalyzer. All the hook does is print out the columns * accessed from each table as recorded in the ColumnAccessInfo * in the QueryPlan. */ @@ -58,14 +58,14 @@ public void run(HookContext hookContext) { } LogHelper console = SessionState.getConsole(); - Map> tableToColumnAccessMap = + Map> tableToColumnAccessMap = columnAccessInfo.getTableToColumnAccessMap(); // We need a new map to ensure output is always produced in the same order. // This makes tests that use this hook deterministic. Map outputOrderedMap = new HashMap(); - for (Map.Entry> tableAccess : tableToColumnAccessMap.entrySet()) { + for (Map.Entry> tableAccess : tableToColumnAccessMap.entrySet()) { StringBuilder perTableInfo = new StringBuilder(); perTableInfo.append("Table:").append(tableAccess.getKey()).append("\n"); // Sort columns to make output deterministic diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index b74868b..6a17b29 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3268,6 +3268,9 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i boolean success = false, indexTableCreated = false; + String[] qualified = + MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName()); + try { ms.openTransaction(); Index old_index = null; @@ -3290,7 +3293,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i Table indexTbl = indexTable; if (indexTbl != null) { try { - indexTbl = ms.getTable(index.getDbName(), index.getIndexTableName()); + indexTbl = ms.getTable(qualified[0], qualified[1]); } catch (Exception e) { } if (indexTbl != null) { @@ -3311,7 +3314,7 @@ private Index add_index_core(final RawStore ms, final Index index, final Table i if (!success) { if (indexTableCreated) { try { - this.drop_table(index.getDbName(), index.getIndexTableName(), false); + drop_table(qualified[0], qualified[1], false); } catch (Exception e) { } } @@ -3365,8 +3368,8 @@ private boolean drop_index_by_name_core(final RawStore ms, String idxTblName = index.getIndexTableName(); if (idxTblName != null) { - Table tbl = null; - tbl = this.get_table(dbName, idxTblName); + String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName); + Table tbl = get_table(qualified[0], qualified[1]); if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 5a56ced..51c3f2c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1538,4 +1538,12 @@ public static int getArchivingLevel(Partition part) throws MetaException { return part.getValues().size(); } } + + public static String[] getQualifiedName(String defaultDbName, String tableName) { + String[] names = tableName.split("\\."); + if (names.length == 1) { + return new String[] { defaultDbName, tableName}; + } + return new String[] {names[0], names[1]}; + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 4f186f4..fa52715 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -2847,7 +2847,8 @@ private MIndex convertToMIndex(Index index) throws InvalidObjectException, "Original table does not exist for the given index."); } - MTable indexTable = getMTable(index.getDbName(), index.getIndexTableName()); + String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName()); + MTable indexTable = getMTable(qualified[0], qualified[1]); if (indexTable == null) { throw new InvalidObjectException( "Underlying index table does not exist for the given index."); diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 8f3f2b6..fea0ecf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -507,7 +507,7 @@ public static void doAuthorization(BaseSemanticAnalyzer sem, String command) // get mapping of tables to columns used ColumnAccessInfo colAccessInfo = sem.getColumnAccessInfo(); // colAccessInfo is set only in case of SemanticAnalyzer - Map> tab2Cols = colAccessInfo != null ? colAccessInfo + Map> tab2Cols = colAccessInfo != null ? colAccessInfo .getTableToColumnAccessMap() : null; doAuthorizationV2(ss, op, inputs, outputs, command, tab2Cols); return; @@ -700,7 +700,7 @@ private static void getTablePartitionUsedColumns(HiveOperation op, BaseSemanticA } private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet inputs, - HashSet outputs, String command, Map> tab2cols) throws HiveException { + HashSet outputs, String command, Map> tab2cols) throws HiveException { HiveAuthzContext.Builder authzContextBuilder = new HiveAuthzContext.Builder(); @@ -711,36 +711,14 @@ private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet authzContextBuilder.setCommandString(command); HiveOperationType hiveOpType = getHiveOperationType(op); - List inputsHObjs = getHivePrivObjects(inputs); - updateInputColumnInfo(inputsHObjs, tab2cols); + List inputsHObjs = getHivePrivObjects(inputs, tab2cols); + List outputHObjs = getHivePrivObjects(outputs, null); - List outputHObjs = getHivePrivObjects(outputs); ss.getAuthorizerV2().checkPrivileges(hiveOpType, inputsHObjs, outputHObjs, authzContextBuilder.build()); - return; } - /** - * Add column information for input table objects - * @param inputsHObjs input HivePrivilegeObject - * @param map table to used input columns mapping - */ - private static void updateInputColumnInfo(List inputsHObjs, - Map> tableName2Cols) { - if(tableName2Cols == null) { - return; - } - for(HivePrivilegeObject inputObj : inputsHObjs){ - if(inputObj.getType() != HivePrivilegeObjectType.TABLE_OR_VIEW){ - // input columns are relevant only for tables or views - continue; - } - Set cols = tableName2Cols.get(Table.getCompleteName(inputObj.getDbname(), - inputObj.getObjectName())); - inputObj.setColumns(cols); - } - } - - private static List getHivePrivObjects(HashSet privObjects) { + private static List getHivePrivObjects( + HashSet privObjects, Map> tableName2Cols) { List hivePrivobjs = new ArrayList(); if(privObjects == null){ return hivePrivobjs; @@ -764,13 +742,17 @@ private static void updateInputColumnInfo(List inputsHObjs, //support for authorization on partitions needs to be added String dbname = null; String objName = null; + List partKeys = null; + List columns = null; switch(privObject.getType()){ case DATABASE: - dbname = privObject.getDatabase() == null ? null : privObject.getDatabase().getName(); + dbname = privObject.getDatabase().getName(); break; case TABLE: - dbname = privObject.getTable() == null ? null : privObject.getTable().getDbName(); - objName = privObject.getTable() == null ? null : privObject.getTable().getTableName(); + dbname = privObject.getTable().getDbName(); + objName = privObject.getTable().getTableName(); + columns = tableName2Cols == null ? null : + tableName2Cols.get(Table.getCompleteName(dbname, objName)); break; case DFS_DIR: case LOCAL_DIR: @@ -788,7 +770,7 @@ private static void updateInputColumnInfo(List inputsHObjs, } HivePrivObjectActionType actionType = AuthorizationUtils.getActionType(privObject); HivePrivilegeObject hPrivObject = new HivePrivilegeObject(privObjType, dbname, objName, - actionType); + partKeys, columns, actionType, null); hivePrivobjs.add(hPrivObject); } return hivePrivobjs; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 40d910c..1f20609 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -35,7 +35,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -600,13 +599,9 @@ private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { HiveAuthorizer authorizer = getSessionAuthorizer(); try { - Set colSet = showGrantDesc.getColumns() != null ? new HashSet( - showGrantDesc.getColumns()) : null; List privInfos = authorizer.showPrivileges( AuthorizationUtils.getHivePrincipal(showGrantDesc.getPrincipalDesc()), - AuthorizationUtils.getHivePrivilegeObject(showGrantDesc.getHiveObj(), - colSet - )); + AuthorizationUtils.getHivePrivilegeObject(showGrantDesc.getHiveObj())); boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); writeToFile(writeGrantInfo(privInfos, testMode), showGrantDesc.getResFile()); } catch (IOException e) { @@ -625,7 +620,7 @@ private int grantOrRevokePrivileges(List principals, //Convert to object types used by the authorization plugin interface List hivePrincipals = AuthorizationUtils.getHivePrincipals(principals); List hivePrivileges = AuthorizationUtils.getHivePrivileges(privileges); - HivePrivilegeObject hivePrivObject = AuthorizationUtils.getHivePrivilegeObject(privSubjectDesc, null); + HivePrivilegeObject hivePrivObject = AuthorizationUtils.getHivePrivilegeObject(privSubjectDesc); HivePrincipal grantorPrincipal = new HivePrincipal( grantor, AuthorizationUtils.getHivePrincipalType(grantorType)); @@ -754,8 +749,7 @@ private int alterDatabase(AlterDatabaseDesc alterDbDesc) throws HiveException { } private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - db.dropIndex(SessionState.get().getCurrentDatabase(), dropIdx.getTableName(), - dropIdx.getIndexName(), true); + db.dropIndex(dropIdx.getTableName(), dropIdx.getIndexName(), true); return 0; } @@ -765,11 +759,7 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException validateSerDe(crtIndex.getSerde()); } - String indexTableName = - crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() : - MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(), - crtIndex.getTableName(), crtIndex.getIndexName()); - + String indexTableName = crtIndex.getIndexTableName(); if (!Utilities.isDefaultNameNode(conf)) { // If location is specified - ensure that it is a full qualified name makeLocationQualified(crtIndex, indexTableName); @@ -792,10 +782,9 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException } private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException { - String dbName = alterIndex.getDbName(); String baseTableName = alterIndex.getBaseTableName(); String indexName = alterIndex.getIndexName(); - Index idx = db.getIndex(dbName, baseTableName, indexName); + Index idx = db.getIndex(baseTableName, indexName); switch(alterIndex.getOp()) { case ADDPROPS: @@ -806,8 +795,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException Map props = new HashMap(); Map, Long> basePartTs = new HashMap, Long>(); - Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(), - baseTableName); + Table baseTbl = db.getTable(baseTableName); if (baseTbl.isPartitioned()) { List baseParts; @@ -854,7 +842,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException } try { - db.alterIndex(dbName, baseTableName, indexName, idx); + db.alterIndex(baseTableName, indexName, idx); } catch (InvalidOperationException e) { console.printError("Invalid alter operation: " + e.getMessage()); LOG.info("alter index: " + stringifyException(e)); @@ -896,7 +884,7 @@ private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws Hiv */ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException { - Table tbl = db.getTable(renamePartitionDesc.getDbName(), renamePartitionDesc.getTableName()); + Table tbl = db.getTable(renamePartitionDesc.getTableName()); Partition oldPart = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false); Partition part = db.getPartition(tbl, renamePartitionDesc.getOldPartSpec(), false); @@ -923,7 +911,7 @@ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) th private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) throws HiveException { - Table tbl = db.getTable(alterPartitionDesc.getDbName(), alterPartitionDesc.getTableName()); + Table tbl = db.getTable(alterPartitionDesc.getTableName(), true); String tabName = alterPartitionDesc.getTableName(); // This is checked by DDLSemanticAnalyzer @@ -1015,14 +1003,11 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD private int touch(Hive db, AlterTableSimpleDesc touchDesc) throws HiveException { - String dbName = touchDesc.getDbName(); - String tblName = touchDesc.getTableName(); - - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(touchDesc.getTableName()); if (touchDesc.getPartSpec() == null) { try { - db.alterTable(tblName, tbl); + db.alterTable(touchDesc.getTableName(), tbl); } catch (InvalidOperationException e) { throw new HiveException("Uable to update table"); } @@ -1034,7 +1019,7 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Specified partition does not exist"); } try { - db.alterPartition(tblName, part); + db.alterPartition(touchDesc.getTableName(), part); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1173,10 +1158,8 @@ boolean partitionInCustomLocation(Table tbl, Partition p) private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext) throws HiveException { - String dbName = simpleDesc.getDbName(); - String tblName = simpleDesc.getTableName(); - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(simpleDesc.getTableName()); if (tbl.getTableType() != TableType.MANAGED_TABLE) { throw new HiveException("ARCHIVE can only be performed on managed tables"); @@ -1378,7 +1361,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, authority.toString(), harPartitionDir.getPath()); // make in Path to ensure no slash at the end setArchived(p, harPath, partSpecInfo.values.size()); - db.alterPartition(tblName, p); + db.alterPartition(simpleDesc.getTableName(), p); } } catch (Exception e) { throw new HiveException("Unable to change the partition info for HAR", e); @@ -1399,10 +1382,8 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) throws HiveException { - String dbName = simpleDesc.getDbName(); - String tblName = simpleDesc.getTableName(); - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(simpleDesc.getTableName()); // Means user specified a table, not a partition if (simpleDesc.getPartSpec() == null) { @@ -1587,7 +1568,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions) { setUnArchived(p); try { - db.alterPartition(tblName, p); + db.alterPartition(simpleDesc.getTableName(), p); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1636,10 +1617,7 @@ private void msckAddPartitionsOneByOne(Hive db, Table table, private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException { - String dbName = desc.getDbName(); - String tblName = desc.getTableName(); - - Table tbl = db.getTable(dbName, tblName); + Table tbl = db.getTable(desc.getTableName()); String partName = null; if (desc.getPartSpec() == null) { @@ -2233,15 +2211,7 @@ private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException { public int showColumns(Hive db, ShowColumnsDesc showCols) throws HiveException { - String dbName = showCols.getDbName(); - String tableName = showCols.getTableName(); - Table table = null; - if (dbName == null) { - table = db.getTable(tableName); - } - else { - table = db.getTable(dbName, tableName); - } + Table table = db.getTable(showCols.getTableName()); // write the results in the file DataOutputStream outStream = null; @@ -4305,10 +4275,12 @@ private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws if (crtIndex.getLocation() == null) { // Location is not set, leave it as-is if index doesn't belong to default DB // Currently all indexes are created in current DB only - if (db.getDatabaseCurrent().getName().equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { + if (Utilities.getDatabaseName(name).equalsIgnoreCase(MetaStoreUtils.DEFAULT_DATABASE_NAME)) { // Default database name path is always ignored, use METASTOREWAREHOUSE and object name // instead - path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE), name.toLowerCase()); + String warehouse = HiveConf.getVar(conf, ConfVars.METASTOREWAREHOUSE); + String tableName = Utilities.getTableName(name); + path = new Path(warehouse, tableName.toLowerCase()); } } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 4cf4522..eecdb6c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2046,19 +2046,39 @@ public static String formatBinaryString(byte[] array, int start, int length) { * @return String array with two elements, first is db name, second is table name * @throws HiveException */ - public static String[] getDbTableName(String dbtable) throws HiveException{ + public static String[] getDbTableName(String dbtable) throws SemanticException { + return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable); + } + + public static String[] getDbTableName(String defaultDb, String dbtable) throws SemanticException { if(dbtable == null){ return new String[2]; } String[] names = dbtable.split("\\."); switch (names.length) { - case 2: - return names; - case 1: - return new String [] {SessionState.get().getCurrentDatabase(), dbtable}; - default: - throw new HiveException(ErrorMsg.INVALID_TABLE_NAME, dbtable); + case 2: + return names; + case 1: + return new String [] {defaultDb, dbtable}; + default: + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbtable); + } + } + + public static String getDatabaseName(String name) throws SemanticException { + String[] split = name.split("\\."); + if (split.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, name); + } + return split[0]; + } + + public static String getTableName(String name) throws SemanticException { + String[] split = name.split("\\."); + if (split.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, name); } + return split[1]; } public static void validateColumnNames(List colNames, List checkCols) diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a7e50ad..a3677a0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -408,6 +408,12 @@ public void alterTable(String tblName, Table newTbl) } } + public void alterIndex(String baseTableName, String indexName, Index newIdx) + throws InvalidOperationException, HiveException { + String[] names = Utilities.getDbTableName(baseTableName); + alterIndex(names[0], names[1], indexName, newIdx); + } + /** * Updates the existing index metadata with the new metadata. * @@ -666,17 +672,16 @@ public void createIndex(String tableName, String indexName, String indexHandlerC throws HiveException { try { - String dbName = SessionState.get().getCurrentDatabase(); Index old_index = null; try { - old_index = getIndex(dbName, tableName, indexName); + old_index = getIndex(tableName, indexName); } catch (Exception e) { } if (old_index != null) { - throw new HiveException("Index " + indexName + " already exists on table " + tableName + ", db=" + dbName); + throw new HiveException("Index " + indexName + " already exists on table " + tableName); } - org.apache.hadoop.hive.metastore.api.Table baseTbl = getMSC().getTable(dbName, tableName); + org.apache.hadoop.hive.metastore.api.Table baseTbl = getTable(tableName).getTTable(); if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) { throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported."); } @@ -685,17 +690,13 @@ public void createIndex(String tableName, String indexName, String indexHandlerC + " is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported."); } - if (indexTblName == null) { - indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName); - } else { - org.apache.hadoop.hive.metastore.api.Table temp = null; - try { - temp = getMSC().getTable(dbName, indexTblName); - } catch (Exception e) { - } - if (temp != null) { - throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); - } + org.apache.hadoop.hive.metastore.api.Table temp = null; + try { + temp = getTable(indexTblName).getTTable(); + } catch (Exception e) { + } + if (temp != null) { + throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); } org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().deepCopy(); @@ -773,7 +774,9 @@ public void createIndex(String tableName, String indexName, String indexHandlerC HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); if (indexHandler.usesIndexTable()) { - tt = new org.apache.hadoop.hive.ql.metadata.Table(dbName, indexTblName).getTTable(); + String idname = Utilities.getDatabaseName(indexTblName); + String itname = Utilities.getTableName(indexTblName); + tt = new org.apache.hadoop.hive.ql.metadata.Table(idname, itname).getTTable(); List partKeys = baseTbl.getPartitionKeys(); tt.setPartitionKeys(partKeys); tt.setTableType(TableType.INDEX_TABLE.toString()); @@ -797,7 +800,9 @@ public void createIndex(String tableName, String indexName, String indexHandlerC throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \"."); } - Index indexDesc = new Index(indexName, indexHandlerClass, dbName, tableName, time, time, indexTblName, + String tdname = Utilities.getDatabaseName(tableName); + String ttname = Utilities.getTableName(tableName); + Index indexDesc = new Index(indexName, indexHandlerClass, tdname, ttname, time, time, indexTblName, storageDescriptor, params, deferredRebuild); if (indexComment != null) { indexDesc.getParameters().put("comment", indexComment); @@ -817,19 +822,6 @@ public void createIndex(String tableName, String indexName, String indexHandlerC } } - public Index getIndex(String qualifiedIndexName) throws HiveException { - String[] names = getQualifiedNames(qualifiedIndexName); - switch (names.length) { - case 3: - return getIndex(names[0], names[1], names[2]); - case 2: - return getIndex(SessionState.get().getCurrentDatabase(), - names[0], names[1]); - default: - throw new HiveException("Invalid index name:" + qualifiedIndexName); - } - } - public Index getIndex(String baseTableName, String indexName) throws HiveException { String[] names = Utilities.getDbTableName(baseTableName); return this.getIndex(names[0], names[1], indexName); @@ -844,6 +836,11 @@ public Index getIndex(String dbName, String baseTableName, } } + public boolean dropIndex(String baseTableName, String index_name, boolean deleteData) throws HiveException { + String[] names = Utilities.getDbTableName(baseTableName); + return dropIndex(names[0], names[1], index_name, deleteData); + } + public boolean dropIndex(String db_name, String tbl_name, String index_name, boolean deleteData) throws HiveException { try { return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java index ae87aac..44844dd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.index.IndexMetadataChangeTask; @@ -103,7 +104,8 @@ private IndexUtils(){ return indexTables; } for (Index index : indexes.get(table)) { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); indexToIndexTable.put(index, indexTable); indexTables.add(indexTable); } @@ -121,7 +123,8 @@ private IndexUtils(){ return indexTables; } for (Index index : indexes.get(partitionedTable)) { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); indexToIndexTable.put(index, indexTable); indexTables.add(indexTable); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java index 11a6d07..6d3b77b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.AggregateIndexHandler; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -373,8 +374,9 @@ private boolean checkIfIndexBuiltOnAllTablePartitions(TableScanOperator tableSca // index is changed. List idxTblColNames = new ArrayList(); try { - Table idxTbl = hiveInstance.getTable(index.getDbName(), + String[] qualified = Utilities.getDbTableName(index.getDbName(), index.getIndexTableName()); + Table idxTbl = hiveInstance.getTable(qualified[0], qualified[1]); for (FieldSchema idxTblCol : idxTbl.getCols()) { idxTblColNames.add(idxTblCol.getName()); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 22945e3..2633819 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -34,6 +34,7 @@ import org.antlr.runtime.tree.CommonTree; import org.antlr.runtime.tree.Tree; +import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -305,6 +306,27 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD return unescapeIdentifier(tableOrColumnNode.getText()); } + public static String[] getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { + if (tabNameNode.getToken().getType() != HiveParser.TOK_TABNAME) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME.getMsg(tabNameNode)); + } + if (tabNameNode.getChildCount() == 2) { + String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); + return new String[] {dbName, tableName}; + } + String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + return new String[]{SessionState.get().getCurrentDatabase(), tableName}; + } + + public static String getDotName(String[] qname) throws SemanticException { + String genericName = StringUtils.join(qname, "."); + if (qname.length != 2) { + throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName); + } + return genericName; + } + /** * Get the unqualified name from a table node. * @@ -817,9 +839,9 @@ public void setColumnAccessInfo(ColumnAccessInfo columnAccessInfo) { this.columnAccessInfo = columnAccessInfo; } - protected HashMap extractPartitionSpecs(Tree partspec) + protected LinkedHashMap extractPartitionSpecs(Tree partspec) throws SemanticException { - HashMap partSpec = new LinkedHashMap(); + LinkedHashMap partSpec = new LinkedHashMap(); for (int i = 0; i < partspec.getChildCount(); ++i) { CommonTree partspec_val = (CommonTree) partspec.getChild(i); String val = stripQuotes(partspec_val.getChild(1).getText()); @@ -1176,23 +1198,16 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem } } + protected Table getTable(String[] qualified) throws SemanticException { + return getTable(qualified[0], qualified[1], true); + } + protected Table getTable(String tblName) throws SemanticException { return getTable(null, tblName, true); } protected Table getTable(String tblName, boolean throwException) throws SemanticException { - String currentDb = SessionState.get().getCurrentDatabase(); - return getTable(currentDb, tblName, throwException); - } - - // qnName : possibly contains database name (dot separated) - protected Table getTableWithQN(String qnName, boolean throwException) throws SemanticException { - int dot = qnName.indexOf('.'); - if (dot < 0) { - String currentDb = SessionState.get().getCurrentDatabase(); - return getTable(currentDb, qnName, throwException); - } - return getTable(qnName.substring(0, dot), qnName.substring(dot + 1), throwException); + return getTable(null, tblName, throwException); } protected Table getTable(String database, String tblName, boolean throwException) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java index 939dc65..a4df8b4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnAccessInfo.java @@ -18,8 +18,11 @@ package org.apache.hadoop.hive.ql.parse; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -42,7 +45,13 @@ public void add(String table, String col) { tableColumns.add(col); } - public Map> getTableToColumnAccessMap() { - return tableToColumnAccessMap; + public Map> getTableToColumnAccessMap() { + Map> mapping = new HashMap>(); + for (Map.Entry> entry : tableToColumnAccessMap.entrySet()) { + List sortedCols = new ArrayList(entry.getValue()); + Collections.sort(sortedCols); + mapping.put(entry.getKey(), sortedCols); + } + return mapping; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 67a3aa7..e0052cb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -217,7 +217,7 @@ public TablePartition() { } public TablePartition(ASTNode tblPart) throws SemanticException { - tableName = unescapeIdentifier(tblPart.getChild(0).getText()); + tableName = getDotName((getQualifiedTableName((ASTNode) tblPart.getChild(0)))); if (tblPart.getChildCount() > 1) { ASTNode part = (ASTNode) tblPart.getChild(1); if (part.getToken().getType() == HiveParser.TOK_PARTSPEC) { @@ -1015,7 +1015,7 @@ private boolean isFullSpec(Table table, Map partSpec) { private void analyzeCreateIndex(ASTNode ast) throws SemanticException { String indexName = unescapeIdentifier(ast.getChild(0).getText()); String typeName = unescapeSQLString(ast.getChild(1).getText()); - String tableName = getUnescapedName((ASTNode) ast.getChild(2)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(2)); List indexedCols = getColumnNames((ASTNode) ast.getChild(3)); IndexType indexType = HiveIndex.getIndexType(typeName); @@ -1081,8 +1081,14 @@ private void analyzeCreateIndex(ASTNode ast) throws SemanticException { storageFormat.fillDefaultStorageFormat(); + if (indexTableName == null) { + indexTableName = MetaStoreUtils.getIndexTableName(qualified[0], qualified[1], indexName); + indexTableName = qualified[0] + "." + indexTableName; // on same database with base table + } else { + indexTableName = getDotName(Utilities.getDbTableName(indexTableName)); + } - CreateIndexDesc crtIndexDesc = new CreateIndexDesc(tableName, indexName, + CreateIndexDesc crtIndexDesc = new CreateIndexDesc(getDotName(qualified), indexName, indexedCols, indexTableName, deferredRebuild, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), storageFormat.getStorageHandler(), typeName, location, idxProps, tblProps, @@ -1116,21 +1122,20 @@ private void analyzeDropIndex(ASTNode ast) throws SemanticException { } private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { - String baseTableName = unescapeIdentifier(ast.getChild(0).getText()); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap partSpec = null; Tree part = ast.getChild(2); if (part != null) { partSpec = extractPartitionSpecs(part); } - List> indexBuilder = getIndexBuilderMapRed(baseTableName, indexName, partSpec); + List> indexBuilder = getIndexBuilderMapRed(qualified, indexName, partSpec); rootTasks.addAll(indexBuilder); // Handle updating index timestamps AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.UPDATETIMESTAMP); alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); + alterIdxDesc.setBaseTableName(getDotName(qualified)); alterIdxDesc.setSpec(partSpec); Task tsTask = TaskFactory.get(new DDLWork(alterIdxDesc), conf); @@ -1142,27 +1147,28 @@ private void analyzeAlterIndexRebuild(ASTNode ast) throws SemanticException { private void analyzeAlterIndexProps(ASTNode ast) throws SemanticException { - String baseTableName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String indexName = unescapeIdentifier(ast.getChild(1).getText()); HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) .getChild(0)); - AlterIndexDesc alterIdxDesc = - new AlterIndexDesc(AlterIndexTypes.ADDPROPS); + AlterIndexDesc alterIdxDesc = new AlterIndexDesc(AlterIndexTypes.ADDPROPS); alterIdxDesc.setProps(mapProp); alterIdxDesc.setIndexName(indexName); - alterIdxDesc.setBaseTableName(baseTableName); - alterIdxDesc.setDbName(SessionState.get().getCurrentDatabase()); + alterIdxDesc.setBaseTableName(getDotName(qualified)); rootTasks.add(TaskFactory.get(new DDLWork(alterIdxDesc), conf)); } - private List> getIndexBuilderMapRed(String baseTableName, String indexName, + private List> getIndexBuilderMapRed(String[] names, String indexName, HashMap partSpec) throws SemanticException { try { - String dbName = SessionState.get().getCurrentDatabase(); - Index index = db.getIndex(dbName, baseTableName, indexName); - Table indexTbl = getTable(index.getIndexTableName()); + Index index = db.getIndex(names[0], names[1], indexName); + Table indexTbl = null; + String indexTableName = index.getIndexTableName(); + if (indexTableName != null) { + indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName)); + } String baseTblName = index.getOrigTableName(); Table baseTbl = getTable(baseTblName); @@ -1474,7 +1480,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, boolean checkIndex = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CONCATENATE_CHECK_INDEX); if (checkIndex) { - List indexes = db.getIndexes(tblObj.getDbName(), tableName, + List indexes = db.getIndexes(tblObj.getDbName(), tblObj.getTableName(), Short.MAX_VALUE); if (indexes != null && indexes.size() > 0) { throw new SemanticException("can not do merge because source table " @@ -1633,7 +1639,7 @@ private void analyzeAlterTableCompact(ASTNode ast, String tableName, LinkedHashMap newPartSpec = null; if (partSpec != null) newPartSpec = new LinkedHashMap(partSpec); - AlterTableSimpleDesc desc = new AlterTableSimpleDesc(SessionState.get().getCurrentDatabase(), + AlterTableSimpleDesc desc = new AlterTableSimpleDesc( tableName, newPartSpec, type); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf)); @@ -2098,25 +2104,17 @@ private void analyzeShowTables(ASTNode ast) throws SemanticException { } private void analyzeShowColumns(ASTNode ast) throws SemanticException { - ShowColumnsDesc showColumnsDesc; - String dbName = null; - String tableName = null; - switch (ast.getChildCount()) { - case 1: - tableName = getUnescapedName((ASTNode) ast.getChild(0)); - break; - case 2: - dbName = getUnescapedName((ASTNode) ast.getChild(0)); - tableName = getUnescapedName((ASTNode) ast.getChild(1)); - break; - default: - break; + String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + if (ast.getChildCount() > 1) { + if (tableName.contains(".")) { + throw new SemanticException("Duplicates declaration for database name"); + } + tableName = getUnescapedName((ASTNode) ast.getChild(1)) + "." + tableName; } - - Table tab = getTable(dbName, tableName, true); + Table tab = getTable(tableName); inputs.add(new ReadEntity(tab)); - showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), dbName, tableName); + ShowColumnsDesc showColumnsDesc = new ShowColumnsDesc(ctx.getResFile(), tableName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showColumnsDesc), conf)); setFetchTask(createFetchTask(showColumnsDesc.getSchema())); @@ -2157,13 +2155,13 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { ShowTblPropertiesDesc showTblPropertiesDesc; - String tableNames = getUnescapedName((ASTNode) ast.getChild(0)); - String dbName = SessionState.get().getCurrentDatabase(); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { propertyName = unescapeSQLString(ast.getChild(1).getText()); } + String tableNames = getDotName(qualified); validateTable(tableNames, null); showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, @@ -2447,7 +2445,7 @@ private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws Sem } private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String newComment = null; String newType = null; newType = getTypeStringFromAST((ASTNode) ast.getChild(3)); @@ -2477,7 +2475,7 @@ private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { String newColName = ast.getChild(2).getText(); /* Validate the operation of renaming a column name. */ - Table tab = getTable(tblName); + Table tab = getTable(qualified); SkewedInfo skewInfo = tab.getTTable().getSd().getSkewedInfo(); if ((null != skewInfo) @@ -2487,6 +2485,7 @@ private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg()); } + String tblName = getDotName(qualified); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol); @@ -2511,9 +2510,8 @@ private void analyzeAlterTableRenamePart(ASTNode ast, String tblName, List> partSpecs = new ArrayList>(); partSpecs.add(oldPartSpec); partSpecs.add(newPartSpec); - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); - RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc( - SessionState.get().getCurrentDatabase(), tblName, oldPartSpec, newPartSpec); + addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_EXCLUSIVE); + RenamePartitionDesc renamePartitionDesc = new RenamePartitionDesc(tblName, oldPartSpec, newPartSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), renamePartitionDesc), conf)); } @@ -2536,7 +2534,9 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, private void analyzeAlterTableModifyCols(ASTNode ast, AlterTableTypes alterType) throws SemanticException { - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + + String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(1)); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, alterType); @@ -2559,8 +2559,8 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) // popular case but that's kinda hacky. Let's not do it for now. boolean canGroupExprs = ifExists; - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tblName, true); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + Table tab = getTable(qualified); Map> partSpecs = getFullPartitionSpecs(ast, tab, canGroupExprs); if (partSpecs.isEmpty()) return; // nothing to do @@ -2574,24 +2574,19 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists, ignoreProtection); DropTableDesc dropTblDesc = - new DropTableDesc(tblName, partSpecs, expectView, ignoreProtection); + new DropTableDesc(getDotName(qualified), partSpecs, expectView, ignoreProtection); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } private void analyzeAlterTablePartColType(ASTNode ast) throws SemanticException { // get table name - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - Table tab = null; // check if table exists. - try { - tab = getTable(tblName, true); - inputs.add(new ReadEntity(tab)); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } + Table tab = getTable(qualified); + inputs.add(new ReadEntity(tab)); // validate the DDL is a valid operation on the table. validateAlterTableType(tab, AlterTableTypes.ALTERPARTITION, false); @@ -2625,7 +2620,7 @@ private void analyzeAlterTablePartColType(ASTNode ast) } AlterTableAlterPartDesc alterTblAlterPartDesc = - new AlterTableAlterPartDesc(SessionState.get().getCurrentDatabase(), tblName, newCol); + new AlterTableAlterPartDesc(getDotName(qualified), newCol); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblAlterPartDesc), conf)); @@ -2648,10 +2643,10 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) throws SemanticException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS; - Table tab = getTable(tblName, true); + Table tab = getTable(qualified); boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); @@ -2662,7 +2657,8 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) String currentLocation = null; Map currentPart = null; // Parser has done some verification, so the order of tokens doesn't need to be verified here. - AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists); + AddPartitionDesc addPartitionDesc = + new AddPartitionDesc(tab.getDbName(), tab.getTableName(), ifNotExists); for (int num = start; num < numCh; num++) { ASTNode child = (ASTNode) ast.getChild(num); switch (child.getToken().getType()) { @@ -2683,7 +2679,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) currentLocation = unescapeSQLString(child.getChild(0).getText()); boolean isLocal = false; try { - // do best effor to determine if this is a local file + // do best effort to determine if this is a local file String scheme = new URI(currentLocation).getScheme(); if (scheme != null) { isLocal = FileUtils.isLocalFile(conf, currentLocation); @@ -2714,7 +2710,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) // Compile internal query to capture underlying table partition dependencies StringBuilder cmd = new StringBuilder(); cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(tblName)); + cmd.append(HiveUtils.unparseIdentifier(getDotName(qualified))); cmd.append(" WHERE "); boolean firstOr = true; for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { @@ -2775,9 +2771,9 @@ private Partition getPartitionForOutput(Table tab, Map currentPa */ private void analyzeAlterTableTouch(CommonTree ast) throws SemanticException { + String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0)); - String tblName = getUnescapedName((ASTNode)ast.getChild(0)); - Table tab = getTable(tblName, true); + Table tab = getTable(qualified); validateAlterTableType(tab, AlterTableTypes.TOUCH); inputs.add(new ReadEntity(tab)); @@ -2786,16 +2782,16 @@ private void analyzeAlterTableTouch(CommonTree ast) if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, null, + getDotName(qualified), null, AlterTableDesc.AlterTableTypes.TOUCH); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); } else { - addTablePartsOutputs(tblName, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); + addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); for (Map partSpec : partSpecs) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, + getDotName(qualified), partSpec, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc), conf)); @@ -2810,12 +2806,12 @@ private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } - String tblName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); // partition name to value List> partSpecs = getPartitionSpecs(ast); - Table tab = getTable(tblName, true); - addTablePartsOutputs(tblName, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); + Table tab = getTable(qualified); + addTablePartsOutputs(tab, partSpecs, true, WriteEntity.WriteType.DDL_NO_LOCK); validateAlterTableType(tab, AlterTableTypes.ARCHIVE); inputs.add(new ReadEntity(tab)); @@ -2835,7 +2831,7 @@ private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) throw new SemanticException(e.getMessage(), e); } AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - SessionState.get().getCurrentDatabase(), tblName, partSpec, + getDotName(qualified), partSpec, (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc), conf)); @@ -3003,20 +2999,20 @@ private void validatePartitionValues(Map partSpec) * Add the table partitions to be modified in the output, so that it is available for the * pre-execution hook. If the partition does not exist, no error is thrown. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, WriteEntity.WriteType writeType) throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, false, null, writeType); + addTablePartsOutputs(table, partSpecs, false, false, null, writeType); } /** * Add the table partitions to be modified in the output, so that it is available for the * pre-execution hook. If the partition does not exist, no error is thrown. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, boolean allowMany, WriteEntity.WriteType writeType) throws SemanticException { - addTablePartsOutputs(tblName, partSpecs, false, allowMany, null, writeType); + addTablePartsOutputs(table, partSpecs, false, allowMany, null, writeType); } /** @@ -3024,10 +3020,9 @@ private void addTablePartsOutputs(String tblName, List> part * pre-execution hook. If the partition does not exist, throw an error if * throwIfNonExistent is true, otherwise ignore it. */ - private void addTablePartsOutputs(String tblName, List> partSpecs, + private void addTablePartsOutputs(Table table, List> partSpecs, boolean throwIfNonExistent, boolean allowMany, ASTNode ast, WriteEntity.WriteType writeType) throws SemanticException { - Table tab = getTable(tblName); Iterator> i; int index; @@ -3036,7 +3031,7 @@ private void addTablePartsOutputs(String tblName, List> part List parts = null; if (allowMany) { try { - parts = db.getPartitions(tab, partSpec); + parts = db.getPartitions(table, partSpec); } catch (HiveException e) { LOG.error("Got HiveException during obtaining list of partitions" + StringUtils.stringifyException(e)); @@ -3045,7 +3040,7 @@ private void addTablePartsOutputs(String tblName, List> part } else { parts = new ArrayList(); try { - Partition p = db.getPartition(tab, partSpec, false); + Partition p = db.getPartition(table, partSpec, false); if (p != null) { parts.add(p); } @@ -3125,17 +3120,18 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { */ HiveConf hiveConf = SessionState.get().getConf(); - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - Table tab = getTable(tableName, true); + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + Table tab = getTable(qualified); inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); + String name = getDotName(qualified); if (ast.getChildCount() == 1) { /* Convert a skewed table to non-skewed table. */ - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, + AlterTableDesc alterTblDesc = new AlterTableDesc(name, true, new ArrayList(), new ArrayList>()); alterTblDesc.setStoredAsSubDirectories(false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -3143,10 +3139,10 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { } else { switch (((ASTNode) ast.getChild(1)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: - handleAlterTableSkewedBy(ast, tableName, tab); + handleAlterTableSkewedBy(ast, name, tab); break; case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); + handleAlterTableDisableStoredAsDirs(name, tab); break; default: assert false; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index ab1188a..8e3c567 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -953,8 +953,8 @@ alterTableStatementSuffix alterStatementPartitionKeyType @init {msgs.push("alter partition key type"); } @after {msgs.pop();} - : identifier KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN - -> ^(TOK_ALTERTABLE_PARTCOLTYPE identifier columnNameType) + : tableName KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN + -> ^(TOK_ALTERTABLE_PARTCOLTYPE tableName columnNameType) ; alterViewStatementSuffix @@ -974,16 +974,14 @@ alterViewStatementSuffix alterIndexStatementSuffix @init { pushMsg("alter index statement", state); } @after { popMsg(state); } - : indexName=identifier - (KW_ON tableNameId=identifier) - partitionSpec? + : indexName=identifier KW_ON tableName partitionSpec? ( KW_REBUILD - ->^(TOK_ALTERINDEX_REBUILD $tableNameId $indexName partitionSpec?) + ->^(TOK_ALTERINDEX_REBUILD tableName $indexName partitionSpec?) | KW_SET KW_IDXPROPERTIES indexProperties - ->^(TOK_ALTERINDEX_PROPERTIES $tableNameId $indexName indexProperties) + ->^(TOK_ALTERINDEX_PROPERTIES tableName $indexName indexProperties) ) ; @@ -1018,16 +1016,16 @@ alterStatementSuffixRename alterStatementSuffixAddCol @init { pushMsg("add column statement", state); } @after { popMsg(state); } - : identifier (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN - -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS identifier columnNameTypeList) - -> ^(TOK_ALTERTABLE_REPLACECOLS identifier columnNameTypeList) + : tableName (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN + -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS tableName columnNameTypeList) + -> ^(TOK_ALTERTABLE_REPLACECOLS tableName columnNameTypeList) ; alterStatementSuffixRenameCol @init { pushMsg("rename column name", state); } @after { popMsg(state); } - : identifier KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? - ->^(TOK_ALTERTABLE_RENAMECOL identifier $oldName $newName colType $comment? alterStatementChangeColPosition?) + : tableName KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? + ->^(TOK_ALTERTABLE_RENAMECOL tableName $oldName $newName colType $comment? alterStatementChangeColPosition?) ; alterStatementChangeColPosition @@ -1039,8 +1037,8 @@ alterStatementChangeColPosition alterStatementSuffixAddPartitions @init { pushMsg("add partition statement", state); } @after { popMsg(state); } - : identifier KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ - -> ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) + : tableName KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ + -> ^(TOK_ALTERTABLE_ADDPARTS tableName ifNotExists? alterStatementSuffixAddPartitionsElement+) ; alterStatementSuffixAddPartitionsElement @@ -1050,22 +1048,22 @@ alterStatementSuffixAddPartitionsElement alterStatementSuffixTouch @init { pushMsg("touch statement", state); } @after { popMsg(state); } - : identifier KW_TOUCH (partitionSpec)* - -> ^(TOK_ALTERTABLE_TOUCH identifier (partitionSpec)*) + : tableName KW_TOUCH (partitionSpec)* + -> ^(TOK_ALTERTABLE_TOUCH tableName (partitionSpec)*) ; alterStatementSuffixArchive @init { pushMsg("archive statement", state); } @after { popMsg(state); } - : identifier KW_ARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_ARCHIVE identifier (partitionSpec)*) + : tableName KW_ARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_ARCHIVE tableName (partitionSpec)*) ; alterStatementSuffixUnArchive @init { pushMsg("unarchive statement", state); } @after { popMsg(state); } - : identifier KW_UNARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_UNARCHIVE identifier (partitionSpec)*) + : tableName KW_UNARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_UNARCHIVE tableName (partitionSpec)*) ; partitionLocation @@ -1078,26 +1076,26 @@ partitionLocation alterStatementSuffixDropPartitions @init { pushMsg("drop partition statement", state); } @after { popMsg(state); } - : identifier KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? - -> ^(TOK_ALTERTABLE_DROPPARTS identifier dropPartitionSpec+ ifExists? ignoreProtection?) + : tableName KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? + -> ^(TOK_ALTERTABLE_DROPPARTS tableName dropPartitionSpec+ ifExists? ignoreProtection?) ; alterStatementSuffixProperties @init { pushMsg("alter properties statement", state); } @after { popMsg(state); } - : name=identifier KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERTABLE_PROPERTIES $name tableProperties) - | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPTABLE_PROPERTIES $name tableProperties ifExists?) + : tableName KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERTABLE_PROPERTIES tableName tableProperties) + | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_DROPTABLE_PROPERTIES tableName tableProperties ifExists?) ; alterViewSuffixProperties @init { pushMsg("alter view properties statement", state); } @after { popMsg(state); } - : name=identifier KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERVIEW_PROPERTIES $name tableProperties) - | name=identifier KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPVIEW_PROPERTIES $name tableProperties ifExists?) + : tableName KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERVIEW_PROPERTIES tableName tableProperties) + | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_DROPVIEW_PROPERTIES tableName tableProperties ifExists?) ; alterStatementSuffixSerdeProperties @@ -1112,8 +1110,8 @@ alterStatementSuffixSerdeProperties tablePartitionPrefix @init {pushMsg("table partition prefix", state);} @after {popMsg(state);} - :name=identifier partitionSpec? - ->^(TOK_TABLE_PARTITION $name partitionSpec?) + : tableName partitionSpec? + ->^(TOK_TABLE_PARTITION tableName partitionSpec?) ; alterTblPartitionStatement @@ -1192,21 +1190,21 @@ alterStatementSuffixLocation alterStatementSuffixSkewedby @init {pushMsg("alter skewed by statement", state);} @after{popMsg(state);} - :name=identifier tableSkewed - ->^(TOK_ALTERTABLE_SKEWED $name tableSkewed) + : tableName tableSkewed + ->^(TOK_ALTERTABLE_SKEWED tableName tableSkewed) | - name=identifier KW_NOT KW_SKEWED - ->^(TOK_ALTERTABLE_SKEWED $name) + tableName KW_NOT KW_SKEWED + ->^(TOK_ALTERTABLE_SKEWED tableName) | - name=identifier KW_NOT storedAsDirs - ->^(TOK_ALTERTABLE_SKEWED $name storedAsDirs) + tableName KW_NOT storedAsDirs + ->^(TOK_ALTERTABLE_SKEWED tableName storedAsDirs) ; alterStatementSuffixExchangePartition @init {pushMsg("alter exchange partition", state);} @after{popMsg(state);} - : name=tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName - -> ^(TOK_EXCHANGEPARTITION $name partitionSpec $exchangename) + : tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName + -> ^(TOK_EXCHANGEPARTITION tableName partitionSpec $exchangename) ; alterStatementSuffixProtectMode @@ -1315,14 +1313,14 @@ showStatement @after { popMsg(state); } : KW_SHOW (KW_DATABASES|KW_SCHEMAS) (KW_LIKE showStmtIdentifier)? -> ^(TOK_SHOWDATABASES showStmtIdentifier?) | KW_SHOW KW_TABLES ((KW_FROM|KW_IN) db_name=identifier)? (KW_LIKE showStmtIdentifier|showStmtIdentifier)? -> ^(TOK_SHOWTABLES (TOK_FROM $db_name)? showStmtIdentifier?) - | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tabname=tableName ((KW_FROM|KW_IN) db_name=identifier)? - -> ^(TOK_SHOWCOLUMNS $db_name? $tabname) + | KW_SHOW KW_COLUMNS (KW_FROM|KW_IN) tableName ((KW_FROM|KW_IN) db_name=identifier)? + -> ^(TOK_SHOWCOLUMNS tableName $db_name?) | KW_SHOW KW_FUNCTIONS showFunctionIdentifier? -> ^(TOK_SHOWFUNCTIONS showFunctionIdentifier?) | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) | KW_SHOW KW_CREATE KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName) | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) - | KW_SHOW KW_TBLPROPERTIES tblName=identifier (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES $tblName $prptyName?) + | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?) | KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) | KW_SHOW KW_LOCKS (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? @@ -1455,26 +1453,25 @@ privilegeIncludeColObject @init {pushMsg("privilege object including columns", state);} @after {popMsg(state);} : KW_ALL -> ^(TOK_RESOURCE_ALL) - | privObjectType identifier (LPAREN cols=columnNameList RPAREN)? partitionSpec? - -> ^(TOK_PRIV_OBJECT_COL identifier privObjectType $cols? partitionSpec?) + | privObjectCols -> ^(TOK_PRIV_OBJECT_COL privObjectCols) ; privilegeObject -@init {pushMsg("privilege subject", state);} +@init {pushMsg("privilege object", state);} @after {popMsg(state);} - : KW_ON privObjectType identifier partitionSpec? - -> ^(TOK_PRIV_OBJECT identifier privObjectType partitionSpec?) + : KW_ON privObject -> ^(TOK_PRIV_OBJECT privObject) ; - // database or table type. Type is optional, default type is table -privObjectType -@init {pushMsg("privilege object type type", state);} -@after {popMsg(state);} - : (KW_DATABASE|KW_SCHEMA) -> ^(TOK_DB_TYPE) - | KW_TABLE? -> ^(TOK_TABLE_TYPE) +privObject + : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier) + | KW_TABLE? tableName partitionSpec? -> ^(TOK_TABLE_TYPE tableName partitionSpec?) ; +privObjectCols + : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier) + | KW_TABLE? tableName (LPAREN cols=columnNameList RPAREN)? partitionSpec? -> ^(TOK_TABLE_TYPE tableName $cols? partitionSpec?) + ; privilegeList @init {pushMsg("grant privilege list", state);} @@ -1551,8 +1548,8 @@ withAdminOption metastoreCheck @init { pushMsg("metastore check statement", state); } @after { popMsg(state); } - : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE table=identifier partitionSpec? (COMMA partitionSpec)*)? - -> ^(TOK_MSCK $repair? ($table partitionSpec*)?) + : KW_MSCK (repair=KW_REPAIR)? (KW_TABLE tableName partitionSpec? (COMMA partitionSpec)*)? + -> ^(TOK_MSCK $repair? (tableName partitionSpec*)?) ; resourceList diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java index 856ec2f..9f12893 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -137,7 +138,9 @@ private void doIndexUpdate(Index index, Map partSpec) throws private boolean containsPartition(Index index, Map partSpec) throws HiveException { - Table indexTable = hive.getTable(index.getIndexTableName()); + String[] qualified = Utilities.getDbTableName(index.getDbName(), + index.getIndexTableName()); + Table indexTable = hive.getTable(qualified[0], qualified[1]); List parts = hive.getPartitions(indexTable, partSpec); return (parts == null || parts.size() == 0); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 51838ae..e1e0ef6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10148,7 +10148,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) // check for existence of table if (ifNotExists) { try { - Table table = getTableWithQN(tableName, false); + Table table = getTable(tableName, false); if (table != null) { // table exists return null; } @@ -10213,7 +10213,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) tblProps = addDefaultProperties(tblProps); if (isTemporary) { - Table likeTable = getTableWithQN(likeTableName, false); + Table likeTable = getTable(likeTableName, false); if (likeTable != null && likeTable.getPartCols().size() > 0) { throw new SemanticException("Partition columns are not supported on temporary tables " + "and source table in CREATE TABLE LIKE is partitioned."); @@ -10333,7 +10333,7 @@ private ASTNode analyzeCreateView(ASTNode ast, QB qb) private void validateCreateView(CreateViewDesc createVwDesc) throws SemanticException { try { - Table oldView = getTableWithQN(createVwDesc.getViewName(), false); + Table oldView = getTable(createVwDesc.getViewName(), false); // ALTER VIEW AS SELECT requires the view must exist if (createVwDesc.getIsAlterViewAs() && oldView == null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index 826bdf3..f92ecf2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -161,7 +161,6 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { PrincipalDesc principalDesc = null; PrivilegeObjectDesc privHiveObj = null; - List cols = null; ASTNode param = null; if (ast.getChildCount() > 0) { @@ -176,30 +175,12 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { if (param.getType() == HiveParser.TOK_RESOURCE_ALL) { privHiveObj = new PrivilegeObjectDesc(); } else if (param.getType() == HiveParser.TOK_PRIV_OBJECT_COL) { - privHiveObj = new PrivilegeObjectDesc(); - //set object name - String text = param.getChild(0).getText(); - privHiveObj.setObject(BaseSemanticAnalyzer.unescapeIdentifier(text)); - //set object type - ASTNode objTypeNode = (ASTNode) param.getChild(1); - privHiveObj.setTable(objTypeNode.getToken().getType() == HiveParser.TOK_TABLE_TYPE); - - //set col and partition spec if specified - for (int i = 2; i < param.getChildCount(); i++) { - ASTNode partOrCol = (ASTNode) param.getChild(i); - if (partOrCol.getType() == HiveParser.TOK_PARTSPEC) { - privHiveObj.setPartSpec(DDLSemanticAnalyzer.getPartSpec(partOrCol)); - } else if (partOrCol.getType() == HiveParser.TOK_TABCOLNAME) { - cols = BaseSemanticAnalyzer.getColumnNames(partOrCol); - } else { - throw new SemanticException("Invalid token type " + partOrCol.getType()); - } - } + privHiveObj = parsePrivObject(param); } } ShowGrantDesc showGrant = new ShowGrantDesc(resultFile.toString(), - principalDesc, privHiveObj, cols); + principalDesc, privHiveObj); return TaskFactory.get(new DDLWork(inputs, outputs, showGrant), conf); } @@ -219,7 +200,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { boolean isAdmin = false; if((isGrant && wAdminOption.getToken().getType() == HiveParser.TOK_GRANT_WITH_ADMIN_OPTION) || (!isGrant && wAdminOption.getToken().getType() == HiveParser.TOK_ADMIN_OPTION_FOR)){ - rolesStartPos = 2; //start reading role names from next postion + rolesStartPos = 2; //start reading role names from next position isAdmin = true; } @@ -242,20 +223,10 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, HashSet outputs) throws SemanticException { - PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); - //set object identifier - subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(0).getText())); - //set object type - ASTNode objTypeNode = (ASTNode) ast.getChild(1); - subject.setTable(objTypeNode.getToken().getType() == HiveParser.TOK_TABLE_TYPE); - if (ast.getChildCount() == 3) { - //if partition spec node is present, set partition spec - ASTNode partSpecNode = (ASTNode) ast.getChild(2); - subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(partSpecNode)); - } + PrivilegeObjectDesc subject = parsePrivObject(ast); if (subject.getTable()) { - Table tbl = getTable(SessionState.get().getCurrentDatabase(), subject.getObject()); + Table tbl = getTable(subject.getObject()); if (subject.getPartSpec() != null) { Partition part = getPartition(tbl, subject.getPartSpec()); outputs.add(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK)); @@ -267,6 +238,30 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, return subject; } + private PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { + PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); + ASTNode child = (ASTNode) ast.getChild(0); + ASTNode gchild = (ASTNode)child.getChild(0); + if (child.getType() == HiveParser.TOK_TABLE_TYPE) { + subject.setTable(true); + String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); + subject.setObject(BaseSemanticAnalyzer.getDotName(qualified)); + } else { + subject.setTable(false); + subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(gchild.getText())); + } + //if partition spec node is present, set partition spec + for (int i = 1; i < child.getChildCount(); i++) { + gchild = (ASTNode) child.getChild(i); + if (gchild.getType() == HiveParser.TOK_PARTSPEC) { + subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(gchild)); + } else if (gchild.getType() == HiveParser.TOK_TABCOLNAME) { + subject.setColumns(BaseSemanticAnalyzer.getColumnNames(gchild)); + } + } + return subject; + } + private List analyzePrivilegeListDef(ASTNode node) throws SemanticException { List ret = new ArrayList(); @@ -289,6 +284,10 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, return ret; } + private Table getTable(String tblName) throws SemanticException { + return getTable(null, tblName); + } + private Table getTable(String database, String tblName) throws SemanticException { try { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java index 0318e4b..db2cf7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java @@ -19,13 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; -import java.util.ArrayList; import java.util.Map; -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.ql.exec.Utilities; /** * AlterIndexDesc. @@ -36,7 +30,6 @@ private static final long serialVersionUID = 1L; private String indexName; private String baseTable; - private String dbName; private Map partSpec; // partition specification of partitions touched private Map props; @@ -105,21 +98,6 @@ public void setSpec(Map partSpec) { } /** - * @return the name of the database that the base table is in - */ - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * the dbName to set - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** * @return the op */ public AlterIndexTypes getOp() { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java index cf67e16..197fda8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableAlterPartDesc.java @@ -20,27 +20,20 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; -import java.util.List; - public class AlterTableAlterPartDesc extends DDLDesc { private String tableName; - private String dbName; private FieldSchema partKeySpec; public AlterTableAlterPartDesc() { } /** - * @param dbName - * database that contains the table / partition * @param tableName * table containing the partition * @param partKeySpec - * key column specification. */ - public AlterTableAlterPartDesc(String dbName, String tableName, FieldSchema partKeySpec) { + public AlterTableAlterPartDesc(String tableName, FieldSchema partKeySpec) { super(); - this.dbName = dbName; this.tableName = tableName; this.partKeySpec = partKeySpec; } @@ -53,14 +46,6 @@ public void setTableName(String tableName) { this.tableName = tableName; } - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - public FieldSchema getPartKeySpec() { return partKeySpec; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java index 541675c..d819d15 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableSimpleDesc.java @@ -29,7 +29,6 @@ */ public class AlterTableSimpleDesc extends DDLDesc { private String tableName; - private String dbName; private LinkedHashMap partSpec; private String compactionType; @@ -39,17 +38,12 @@ public AlterTableSimpleDesc() { } /** - * @param dbName - * database that contains the table / partition * @param tableName * table containing the partition * @param partSpec - * partition specification. Null if touching a table. */ - public AlterTableSimpleDesc(String dbName, String tableName, - Map partSpec, AlterTableDesc.AlterTableTypes type) { - super(); - this.dbName = dbName; + public AlterTableSimpleDesc(String tableName, + Map partSpec, AlterTableTypes type) { this.tableName = tableName; if(partSpec == null) { this.partSpec = null; @@ -61,16 +55,14 @@ public AlterTableSimpleDesc(String dbName, String tableName, /** * Constructor for ALTER TABLE ... COMPACT. - * @param dbname name of the database containing the table * @param tableName name of the table to compact * @param partSpec partition to compact * @param compactionType currently supported values: 'major' and 'minor' */ - public AlterTableSimpleDesc(String dbname, String tableName, - LinkedHashMap partSpec, String compactionType) { + public AlterTableSimpleDesc(String tableName, + LinkedHashMap partSpec, String compactionType) { type = AlterTableTypes.COMPACT; this.compactionType = compactionType; - this.dbName = dbname; this.tableName = tableName; this.partSpec = partSpec; } @@ -83,14 +75,6 @@ public void setTableName(String tableName) { this.tableName = tableName; } - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } - public AlterTableDesc.AlterTableTypes getType() { return type; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java index 9417220..5265289 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.plan; import java.util.HashMap; +import java.util.List; @Explain(displayName="privilege subject") public class PrivilegeObjectDesc { @@ -30,6 +31,8 @@ private HashMap partSpec; + private List columns; + public PrivilegeObjectDesc(boolean isTable, String object, HashMap partSpec) { super(); @@ -68,4 +71,11 @@ public void setPartSpec(HashMap partSpec) { this.partSpec = partSpec; } + public List getColumns() { + return columns; + } + + public void setColumns(List columns) { + this.columns = columns; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java index 1b5fb9e..7523d01 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java @@ -29,7 +29,6 @@ private static final long serialVersionUID = 1L; String tableName; - String dbName; String location; LinkedHashMap oldPartSpec; LinkedHashMap newPartSpec; @@ -50,31 +49,14 @@ public RenamePartitionDesc() { * @param newPartSpec * new partition specification. */ - public RenamePartitionDesc(String dbName, String tableName, + public RenamePartitionDesc(String tableName, Map oldPartSpec, Map newPartSpec) { - super(); - this.dbName = dbName; this.tableName = tableName; this.oldPartSpec = new LinkedHashMap(oldPartSpec); this.newPartSpec = new LinkedHashMap(newPartSpec); } /** - * @return database name - */ - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * database name - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** * @return the table we're going to add the partitions to. */ public String getTableName() { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java index fe6a91e..28d16a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowColumnsDesc.java @@ -23,7 +23,6 @@ public class ShowColumnsDesc extends DDLDesc implements Serializable { private static final long serialVersionUID = 1L; - String dbName; String tableName; String resFile; /** @@ -63,16 +62,6 @@ public ShowColumnsDesc(Path resFile, String tableName) { } /** - * @param dbName name of the database - * @param tableName name of table to show columns of - */ - public ShowColumnsDesc(Path resFile, String dbName, String tableName) { - this.resFile = resFile.toString(); - this.dbName = dbName; - this.tableName = tableName; - } - - /** * @return the tableName */ @Explain(displayName = "table name") @@ -103,12 +92,4 @@ public String getResFile() { public void setResFile(String resFile) { this.resFile = resFile; } - - public String getDbName() { - return dbName; - } - - public void setDbName(String dbName) { - this.dbName = dbName; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java index aa88153..d27da3d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hive.ql.plan; -import java.util.List; - @Explain(displayName="show grant desc") public class ShowGrantDesc { @@ -26,8 +24,6 @@ private PrivilegeObjectDesc hiveObj; - private List columns; - private String resFile; /** @@ -42,11 +38,10 @@ public ShowGrantDesc(){ } public ShowGrantDesc(String resFile, PrincipalDesc principalDesc, - PrivilegeObjectDesc subjectObj, List columns) { + PrivilegeObjectDesc subjectObj) { this.resFile = resFile; this.principalDesc = principalDesc; this.hiveObj = subjectObj; - this.columns = columns; } public static String getSchema() { @@ -78,12 +73,4 @@ public String getResFile() { public void setResFile(String resFile) { this.resFile = resFile; } - - public List getColumns() { - return columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java index 5c94217..2113f45 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java @@ -19,7 +19,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; @@ -173,14 +172,15 @@ public static HivePrivilegeObject getHiveObjectRef(HiveObjectRef privObj) throws privObj.getPartValues(), privObj.getColumnName()); } - public static HivePrivilegeObject getHivePrivilegeObject( - PrivilegeObjectDesc privSubjectDesc, Set columns) throws HiveException { + public static HivePrivilegeObject getHivePrivilegeObject(PrivilegeObjectDesc privSubjectDesc) + throws HiveException { // null means ALL for show grants, GLOBAL for grant/revoke HivePrivilegeObjectType objectType = null; String[] dbTable; List partSpec = null; + List columns = null; if (privSubjectDesc == null) { dbTable = new String[] {null, null}; } else { @@ -192,6 +192,7 @@ public static HivePrivilegeObject getHivePrivilegeObject( if (privSubjectDesc.getPartSpec() != null) { partSpec = new ArrayList(privSubjectDesc.getPartSpec().values()); } + columns = privSubjectDesc.getColumns(); objectType = getPrivObjectType(privSubjectDesc); } return new HivePrivilegeObject(objectType, dbTable[0], dbTable[1], partSpec, columns, null); diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java index 9e9ef71..093b4fd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java @@ -19,10 +19,8 @@ import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Set; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; @@ -91,7 +89,7 @@ private int compare(Collection o1, Collection o2) { private final String objectName; private final List commandParams; private final List partKeys; - private Set columns; + private final List columns; private final HivePrivObjectActionType actionType; public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName) { @@ -106,7 +104,7 @@ public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String o public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName, List partKeys, String column) { this(type, dbname, objectName, partKeys, - column == null ? null : new HashSet(Arrays.asList(column)), + column == null ? null : Arrays.asList(column), HivePrivObjectActionType.OTHER, null); } @@ -121,12 +119,12 @@ public static HivePrivilegeObject createHivePrivilegeObject(List cmdPara } public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName, - List partKeys, Set columns, List commandParams) { + List partKeys, List columns, List commandParams) { this(type, dbname, objectName, partKeys, columns, HivePrivObjectActionType.OTHER, commandParams); } public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName, - List partKeys, Set columns, HivePrivObjectActionType actionType, + List partKeys, List columns, HivePrivObjectActionType actionType, List commandParams) { this.type = type; this.dbname = dbname; @@ -170,7 +168,7 @@ public HivePrivObjectActionType getActionType() { * Column information is not set for DDL operations and for tables being written into * @return list of applicable columns */ - public Set getColumns() { + public List getColumns() { return columns; } @@ -218,9 +216,4 @@ public String toString() { private String getDbObjectName(String dbname2, String objectName2) { return (dbname == null ? "" : dbname + ".") + objectName; } - - public void setColumns(Set columnms) { - this.columns = columnms; - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java index fbc0090..ac1cc47 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java @@ -319,7 +319,7 @@ private void grantOrRevokeRole(List principals, List role privs.addAll(hive.showPrivilegeGrant(HiveObjectType.DATABASE, name, type, dbObj.getName(), null, null, null)); } else { - Set columns = privObj.getColumns(); + List columns = privObj.getColumns(); if (columns != null && !columns.isEmpty()) { // show column level privileges for (String columnName : columns) { diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 98c2924..406aae9 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -397,6 +397,7 @@ public void testIndex() throws Throwable { try{ // create a simple table String tableName = "table_for_testindex"; + String qTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + tableName; try { hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName); } catch (HiveException e) { @@ -431,6 +432,7 @@ public void testIndex() throws Throwable { List indexedCols = new ArrayList(); indexedCols.add("col1"); String indexTableName = "index_on_table_for_testindex_table"; + String qIndexTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + indexTableName; boolean deferredRebuild = true; String inputFormat = SequenceFileInputFormat.class.getName(); String outputFormat = SequenceFileOutputFormat.class.getName(); @@ -446,7 +448,7 @@ public void testIndex() throws Throwable { Map indexProps = null; Map tableProps = null; Map serdeProps = new HashMap(); - hm.createIndex(tableName, indexName, indexHandlerClass, indexedCols, indexTableName, + hm.createIndex(qTableName, indexName, indexHandlerClass, indexedCols, qIndexTableName, deferredRebuild, inputFormat, outputFormat, serde, storageHandler, location, indexProps, tableProps, serdeProps, collItemDelim, fieldDelim, fieldEscape, lineDelim, mapKeyDelim, indexComment); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java index 5f32d5f..c5a0b8d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -79,8 +79,7 @@ public void testNonPartitionedTable() throws Exception { boolean sawException = false; AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo compact 'major'"); Assert.assertEquals("major", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); } @Test @@ -100,8 +99,7 @@ public void testMajor() throws Exception { AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo partition(ds = 'today') compact 'major'"); Assert.assertEquals("major", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); HashMap parts = desc.getPartSpec(); Assert.assertEquals(1, parts.size()); Assert.assertEquals("today", parts.get("ds")); @@ -112,8 +110,7 @@ public void testMinor() throws Exception { AlterTableSimpleDesc desc = parseAndAnalyze("alter table foo partition(ds = 'today') compact 'minor'"); Assert.assertEquals("minor", desc.getCompactionType()); - Assert.assertEquals("foo", desc.getTableName()); - Assert.assertEquals("default", desc.getDbName()); + Assert.assertEquals("default.foo", desc.getTableName()); HashMap parts = desc.getPartSpec(); Assert.assertEquals(1, parts.size()); Assert.assertEquals("today", parts.get("ds")); diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java index 93901ec..e3b82f3 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java @@ -31,6 +31,7 @@ public class PrivilegesTestBase { protected static final String DB = "default"; protected static final String TABLE = "table1"; + protected static final String TABLE_QNAME = DB + "." + TABLE; protected static final String USER = "user1"; public static void grantUserTable(String privStr, PrivilegeType privType, HiveConf conf, Hive db) @@ -50,7 +51,7 @@ public static void grantUserTable(String privStr, PrivilegeType privType, HiveCo Assert.assertEquals(USER, principal.getName()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java index ab0d80e..e49ba05 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java @@ -50,6 +50,7 @@ private static final String SELECT = "SELECT"; private static final String DB = "default"; private static final String TABLE = "table1"; + private static final String TABLE_QNAME = DB + "." + TABLE; private static final String GROUP = "group1"; private static final String ROLE = "role1"; private static final String USER = "user1"; @@ -74,6 +75,7 @@ public void setup() throws Exception { parseDriver = new ParseDriver(); analyzer = new DDLSemanticAnalyzer(conf, db); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); + Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap(), false)) .thenReturn(partition); HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator(); @@ -121,7 +123,7 @@ public void testGrantUserTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * GRANT ... ON TABLE ... TO ROLE ... @@ -139,7 +141,7 @@ public void testGrantRoleTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * GRANT ... ON TABLE ... TO GROUP ... @@ -157,7 +159,7 @@ public void testGrantGroupTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * REVOKE ... ON TABLE ... FROM USER ... @@ -175,7 +177,7 @@ public void testRevokeUserTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * REVOKE ... ON TABLE ... FROM ROLE ... @@ -193,7 +195,7 @@ public void testRevokeRoleTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * REVOKE ... ON TABLE ... FROM GROUP ... @@ -211,7 +213,7 @@ public void testRevokeGroupTable() throws Exception { Assert.assertEquals(Privilege.SELECT, privilege.getPrivilege()); } Assert.assertTrue("Expected table", grantDesc.getPrivilegeSubjectDesc().getTable()); - Assert.assertEquals(TABLE, grantDesc.getPrivilegeSubjectDesc().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getPrivilegeSubjectDesc().getObject()); } /** * GRANT ROLE ... TO USER ... @@ -380,7 +382,7 @@ public void testShowGrantUserOnTable() throws Exception { Assert.assertEquals(PrincipalType.USER, grantDesc.getPrincipalDesc().getType()); Assert.assertEquals(USER, grantDesc.getPrincipalDesc().getName()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); - Assert.assertEquals(TABLE, grantDesc.getHiveObj().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getHiveObj().getObject()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); } /** @@ -394,7 +396,7 @@ public void testShowGrantRoleOnTable() throws Exception { Assert.assertEquals(PrincipalType.ROLE, grantDesc.getPrincipalDesc().getType()); Assert.assertEquals(ROLE, grantDesc.getPrincipalDesc().getName()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); - Assert.assertEquals(TABLE, grantDesc.getHiveObj().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getHiveObj().getObject()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); } /** @@ -408,7 +410,7 @@ public void testShowGrantGroupOnTable() throws Exception { Assert.assertEquals(PrincipalType.GROUP, grantDesc.getPrincipalDesc().getType()); Assert.assertEquals(GROUP, grantDesc.getPrincipalDesc().getName()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); - Assert.assertEquals(TABLE, grantDesc.getHiveObj().getObject()); + Assert.assertEquals(TABLE_QNAME, grantDesc.getHiveObj().getObject()); Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java index fd827ad..c97bbb8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV1.java @@ -46,6 +46,7 @@ public void setup() throws Exception { partition = new Partition(table); SessionState.start(conf); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); + Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap(), false)) .thenReturn(partition); } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV2.java ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV2.java index 9499986..7b28375 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV2.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestPrivilegesV2.java @@ -47,6 +47,7 @@ public void setup() throws Exception { table = new Table(DB, TABLE); SessionState.start(conf); Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table); + Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table); Mockito.when(db.getPartition(table, new HashMap(), false)) .thenReturn(partition); } diff --git ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out index 500d45d..ef85e06 100644 --- ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out +++ ql/src/test/results/clientnegative/alter_concatenate_indexed_table.q.out @@ -74,4 +74,4 @@ PREHOOK: type: SHOWINDEXES POSTHOOK: query: show indexes on src_rc_concatenate_test POSTHOOK: type: SHOWINDEXES src_rc_concatenate_test_index src_rc_concatenate_test key default__src_rc_concatenate_test_src_rc_concatenate_test_index__ compact -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table src_rc_concatenate_test is indexed. +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table default.src_rc_concatenate_test is indexed. diff --git ql/src/test/results/clientnegative/alter_view_failure6.q.out ql/src/test/results/clientnegative/alter_view_failure6.q.out index cfbaca8..564b78b 100644 --- ql/src/test/results/clientnegative/alter_view_failure6.q.out +++ ql/src/test/results/clientnegative/alter_view_failure6.q.out @@ -15,5 +15,5 @@ SELECT hr,key FROM srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: default@xxx7 -FAILED: SemanticException [Error 10041]: No partition predicate found for Alias "xxx7:srcpart" Table "srcpart" +FAILED: SemanticException [Error 10041]: No partition predicate found for Alias "default.xxx7:srcpart" Table "srcpart" FAILED: SemanticException [Error 10056]: The query does not reference any valid partition. To run this query, set hive.mapred.mode=nonstrict diff --git ql/src/test/results/clientnegative/merge_negative_1.q.out ql/src/test/results/clientnegative/merge_negative_1.q.out index 95f6678..e7bb828 100644 --- ql/src/test/results/clientnegative/merge_negative_1.q.out +++ ql/src/test/results/clientnegative/merge_negative_1.q.out @@ -10,4 +10,4 @@ PREHOOK: type: CREATEINDEX POSTHOOK: query: CREATE INDEX src_index_merge_test ON TABLE src2(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Output: default@default__src2_src_index_merge_test__ -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table src2 is indexed. +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: can not do merge because source table default.src2 is indexed. diff --git ql/src/test/results/clientnegative/merge_negative_2.q.out ql/src/test/results/clientnegative/merge_negative_2.q.out index b3422e1..b75bff9 100644 --- ql/src/test/results/clientnegative/merge_negative_2.q.out +++ ql/src/test/results/clientnegative/merge_negative_2.q.out @@ -15,4 +15,4 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@srcpart2@ds=2011 POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart2 PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: source table srcpart2 is partitioned but no partition desc found. +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: source table default.srcpart2 is partitioned but no partition desc found. diff --git ql/src/test/results/clientnegative/show_columns3.q.out ql/src/test/results/clientnegative/show_columns3.q.out index 09068b7..4479fd0 100644 --- ql/src/test/results/clientnegative/show_columns3.q.out +++ ql/src/test/results/clientnegative/show_columns3.q.out @@ -17,4 +17,4 @@ PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE -FAILED: SemanticException [Error 10001]: Table not found test_db.foo +FAILED: SemanticException Duplicates declaration for database name diff --git ql/src/test/results/clientnegative/show_tableproperties1.q.out ql/src/test/results/clientnegative/show_tableproperties1.q.out index ca54088..29ed890 100644 --- ql/src/test/results/clientnegative/show_tableproperties1.q.out +++ ql/src/test/results/clientnegative/show_tableproperties1.q.out @@ -1 +1 @@ -FAILED: SemanticException [Error 10001]: Table not found NonExistentTable +FAILED: SemanticException [Error 10001]: Table not found default.NonExistentTable diff --git ql/src/test/results/clientnegative/temp_table_index.q.out ql/src/test/results/clientnegative/temp_table_index.q.out index 8ec5c0a..f452713 100644 --- ql/src/test/results/clientnegative/temp_table_index.q.out +++ ql/src/test/results/clientnegative/temp_table_index.q.out @@ -7,4 +7,4 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 PREHOOK: query: create index tmp1_idx on table tmp1 (c1) as 'COMPACT' with deferred rebuild PREHOOK: type: CREATEINDEX -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: tableName=tmp1 is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: tableName=default.tmp1 is a TEMPORARY TABLE. Index on TEMPORARY TABLE is not supported. diff --git ql/src/test/results/clientpositive/drop_multi_partitions.q.out ql/src/test/results/clientpositive/drop_multi_partitions.q.out index eae57f3..87e4e61 100644 --- ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -42,7 +42,8 @@ POSTHOOK: type: ALTERTABLE_DROPPARTS ABSTRACT SYNTAX TREE: TOK_ALTERTABLE_DROPPARTS - mp + TOK_TABNAME + mp TOK_PARTSPEC TOK_PARTVAL b @@ -57,7 +58,7 @@ STAGE PLANS: Stage: Stage-0 Drop Table Operator: Drop Table - table: mp + table: default.mp PREHOOK: query: alter table mp drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS diff --git ql/src/test/results/clientpositive/input3.q.out ql/src/test/results/clientpositive/input3.q.out index 547449c..3185ad9 100644 --- ql/src/test/results/clientpositive/input3.q.out +++ ql/src/test/results/clientpositive/input3.q.out @@ -59,7 +59,7 @@ STAGE PLANS: Alter Table type: add columns new columns: x double - old name: TEST3b + old name: default.TEST3b PREHOOK: query: ALTER TABLE TEST3b ADD COLUMNS (X DOUBLE) PREHOOK: type: ALTERTABLE_ADDCOLS @@ -145,7 +145,7 @@ STAGE PLANS: Alter Table type: replace columns new columns: r1 int, r2 double - old name: TEST3c + old name: default.TEST3c PREHOOK: query: ALTER TABLE TEST3c REPLACE COLUMNS (R1 INT, R2 DOUBLE) PREHOOK: type: ALTERTABLE_REPLACECOLS diff --git ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out index 21bd257..16aa42d 100644 --- ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out +++ ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out @@ -213,13 +213,19 @@ POSTHOOK: Input: db2@destintable@ds=2011-11-11 97 val_97 PREHOOK: query: drop table db2.destinTable PREHOOK: type: DROPTABLE +PREHOOK: Input: db2@destintable +PREHOOK: Output: db2@destintable POSTHOOK: query: drop table db2.destinTable POSTHOOK: type: DROPTABLE +POSTHOOK: Input: db2@destintable POSTHOOK: Output: db2@destintable PREHOOK: query: drop table db1.sourceTable PREHOOK: type: DROPTABLE +PREHOOK: Input: db1@sourcetable +PREHOOK: Output: db1@sourcetable POSTHOOK: query: drop table db1.sourceTable POSTHOOK: type: DROPTABLE +POSTHOOK: Input: db1@sourcetable POSTHOOK: Output: db1@sourcetable PREHOOK: query: DROP DATABASE db1 PREHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/show_create_table_db_table.q.out ql/src/test/results/clientpositive/show_create_table_db_table.q.out index 0119471..16c2401 100644 --- ql/src/test/results/clientpositive/show_create_table_db_table.q.out +++ ql/src/test/results/clientpositive/show_create_table_db_table.q.out @@ -44,8 +44,11 @@ TBLPROPERTIES ( #### A masked pattern was here #### PREHOOK: query: DROP TABLE tmp_feng.tmp_showcrt PREHOOK: type: DROPTABLE +PREHOOK: Input: tmp_feng@tmp_showcrt +PREHOOK: Output: tmp_feng@tmp_showcrt POSTHOOK: query: DROP TABLE tmp_feng.tmp_showcrt POSTHOOK: type: DROPTABLE +POSTHOOK: Input: tmp_feng@tmp_showcrt POSTHOOK: Output: tmp_feng@tmp_showcrt PREHOOK: query: DROP DATABASE tmp_feng PREHOOK: type: DROPDATABASE diff --git ql/src/test/results/clientpositive/show_tblproperties.q.out ql/src/test/results/clientpositive/show_tblproperties.q.out index 80db5e4..23fa93c 100644 --- ql/src/test/results/clientpositive/show_tblproperties.q.out +++ ql/src/test/results/clientpositive/show_tblproperties.q.out @@ -9,7 +9,7 @@ PREHOOK: query: show tblproperties tmpfoo("bar") PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties tmpfoo("bar") POSTHOOK: type: SHOW_TBLPROPERTIES -Table tmpfoo does not have property: bar +Table default.tmpfoo does not have property: bar PREHOOK: query: alter table tmpfoo set tblproperties ("bar" = "bar value") PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@tmpfoo diff --git ql/src/test/results/clientpositive/temp_table_names.q.out ql/src/test/results/clientpositive/temp_table_names.q.out index 940684c..1e0f973 100644 --- ql/src/test/results/clientpositive/temp_table_names.q.out +++ ql/src/test/results/clientpositive/temp_table_names.q.out @@ -40,8 +40,11 @@ POSTHOOK: Input: default@temp_table_names #### A masked pattern was here #### PREHOOK: query: drop table Default.TEMP_TABLE_names PREHOOK: type: DROPTABLE +PREHOOK: Input: default@temp_table_names +PREHOOK: Output: default@temp_table_names POSTHOOK: query: drop table Default.TEMP_TABLE_names POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@temp_table_names POSTHOOK: Output: default@temp_table_names PREHOOK: query: show tables 'temp_table_names' PREHOOK: type: SHOWTABLES diff --git ql/src/test/results/clientpositive/temp_table_precedence.q.out ql/src/test/results/clientpositive/temp_table_precedence.q.out index 1075b2c..8574c2d 100644 --- ql/src/test/results/clientpositive/temp_table_precedence.q.out +++ ql/src/test/results/clientpositive/temp_table_precedence.q.out @@ -176,9 +176,12 @@ POSTHOOK: Input: ttp@tab2 PREHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again drop table ttp.tab2 PREHOOK: type: DROPTABLE +PREHOOK: Input: ttp@tab2 +PREHOOK: Output: ttp@tab2 POSTHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again drop table ttp.tab2 POSTHOOK: type: DROPTABLE +POSTHOOK: Input: ttp@tab2 POSTHOOK: Output: ttp@tab2 PREHOOK: query: describe ttp.tab2 PREHOOK: type: DESCTABLE