Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1036686) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -315,6 +315,10 @@ HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe"), SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook",null), + + HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false), + HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager", null), + HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager", null), ; Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 1036686) +++ metastore/if/hive_metastore.thrift (working copy) @@ -29,11 +29,25 @@ 4: optional list fields // if the name is one of the user defined types } +struct PrincipalPrivilegeSet { + 1: map userPrivileges, // user name -> privilege set + 2: map groupPrivileges, // group name -> privilege set + 3: map rolePrivileges, //role name -> privilege set +} + // namespace for tables struct Database { 1: string name, 2: string description, 3: string locationUri, + 4: optional PrincipalPrivilegeSet privileges +} + +struct Role { + 1: string roleName, + 2: Database database, + 3: i32 createTime, + 4: string ownerName, } // This object holds the information needed by SerDes @@ -76,7 +90,8 @@ 9: map parameters, // to store comments or any other user level parameters 10: string viewOriginalText, // original view text, null for non-view 11: string viewExpandedText, // expanded view text, null for non-view - 12: string tableType // table type enum, e.g. EXTERNAL_TABLE + 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + 13: optional PrincipalPrivilegeSet privileges, } struct Partition { @@ -86,7 +101,8 @@ 4: i32 createTime, 5: i32 lastAccessTime, 6: StorageDescriptor sd, - 7: map parameters + 7: map parameters, + 8: optional PrincipalPrivilegeSet privileges } struct Index { @@ -109,6 +125,62 @@ 2: map properties } +struct ColumnPrivilegeBag { + 1: string dbName, + 2: string tableName, + 3: string partitionName, + 4: map columnPrivileges +} + +struct PrivilegeBag { + 1: string userPrivileges, //user privileges + 2: map dbPrivileges, //database privileges + 3: map tablePrivileges, //table privileges + 4: map partitionPrivileges, //partition privileges + 5: list columnPrivileges, //column privileges +} + +struct SecurityUser { + 1: string principalName, + 2: bool isRole, + 3: bool isGroup, + 4: string privileges, + 5: i32 createTime, + 6: string grantor, +} + +struct SecurityDB { + 1: string principalName, + 2: bool isRole, + 3: bool isGroup, + 4: string privileges, + 5: i32 createTime, + 6: string grantor, + 7: Database db, +} + +struct SecurityTablePartition { + 1: string principalName, + 2: bool isRole, + 3: bool isGroup, + 4: string privileges, + 5: i32 createTime, + 6: string grantor, + 7: Table table, + 8: Partition part, +} + +struct SecurityColumn { + 1: string principalName, + 2: bool isRole, + 3: bool isGroup, + 4: string privileges, + 5: i32 createTime, + 6: string grantor, + 7: Table table, + 8: Partition partition, + 9: string column, +} exception MetaException { 1: string message @@ -269,6 +341,46 @@ throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) throws(1:MetaException o2) + + //authorization privileges + PrincipalPrivilegeSet get_user_privilege_set (1: string user_name, 2: list group_names) + throws(1:MetaException o1) + PrincipalPrivilegeSet get_db_privilege_set (1: string db_name, 2: string user_name, 3: list group_names) + throws(1:MetaException o1) + PrincipalPrivilegeSet get_table_privilege_set (1: string db_name, 2: string table_name, 3: string user_name, 4: list group_names) + throws(1:MetaException o1) + PrincipalPrivilegeSet get_partition_privilege_set (1: string db_name, 2: string table_name, 3: string part_name, 4: string user_name, 5: list group_names) + throws(1:MetaException o1) + + PrincipalPrivilegeSet get_column_privilege_set (1: string db_name, 2: string table_name, 3: string part_name, 4: string column_name, 5: string user_name, 6: list group_names) + throws(1:MetaException o1) + + bool create_role(1: string role_name, 2: string owner_name, 3: string db_name) throws(1:MetaException o1) + + bool drop_role(1: string role_name, 2: string db_name) throws(1:MetaException o1) + + bool add_role_member (1: string role_name, 2: string user_name, 3: bool is_role, 4: bool is_group, 5: string db_name) throws(1:MetaException o1) + + bool remove_role_member (1: string role_name, 2: string user_name, 3: bool is_role, 4: bool is_group, 5: string db_name) throws(1:MetaException o1) + + list list_roles(1: string principal_name, 2: bool is_role, 3: bool is_group, 4: string db_name) throws(1:MetaException o1) + + list list_security_user_grant(1: string principla_name, 2: bool is_role, 3: bool is_group) throws(1:MetaException o1) + + list list_security_db_grant(1: string principal_name, 2: bool is_group, 3: bool is_role, 4: string db_name) throws(1:MetaException o1) + + list list_security_table_grant(1: string principal_name, 2: bool is_group, 3: bool is_role, 4: string db_name, 5: string table_name) throws(1:MetaException o1) + + list list_security_partition_grant(1: string principal_name, 2: bool is_group, 3: bool is_role, 4: string db_name, 5: string table_name, 6: string part_name) throws(1:MetaException o1) + + list list_security_column_grant(1: string principal_name, 2: bool is_group, 3: bool is_role, 4: string db_name, 5: string table_name, 6: string part_name, 7: string column_name) throws(1:MetaException o1) + + bool grant_privileges (1: string user_name, 2: bool is_role, 3: bool is_group, 4: PrivilegeBag privileges, 5: string grantor) throws(1:MetaException o1) + + bool revoke_privileges (1: string user_name, 2: bool is_role, 3: bool is_group, 4: PrivilegeBag privileges) throws(1:MetaException o1) + + bool revoke_all_privileges (1: string user_name, 2: bool is_role, 3: bool is_group, 4: bool remove_user_priv, 5: list dbs, + 6: list tables, 7: list parts, 8: map> columns) throws(1:MetaException o1) } // * Note about the DDL_TIME: When creating or altering a table or a partition, Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1036686) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -48,12 +48,25 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SecurityColumn; +import org.apache.hadoop.hive.metastore.api.SecurityDB; +import org.apache.hadoop.hive.metastore.api.SecurityTablePartition; +import org.apache.hadoop.hive.metastore.api.SecurityUser; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook; +import org.apache.hadoop.hive.metastore.model.MSecurityColumn; +import org.apache.hadoop.hive.metastore.model.MSecurityDB; +import org.apache.hadoop.hive.metastore.model.MSecurityRoleEntity; +import org.apache.hadoop.hive.metastore.model.MSecurityTablePartition; +import org.apache.hadoop.hive.metastore.model.MSecurityUser; +import org.apache.hadoop.hive.metastore.model.MSecurityUserRoleMap; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; @@ -360,7 +373,7 @@ } catch (NoSuchObjectException e) { ms.createDatabase( new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, - wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString())); + wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null)); } HMSHandler.createDefaultDB = true; } @@ -1225,17 +1238,20 @@ throw new NoSuchObjectException("Partition doesn't exist. " + part_vals); } - + isArchived = MetaStoreUtils.isArchived(part); if (isArchived) { archiveParentDir = MetaStoreUtils.getOriginalLocation(part); } + if (part.getSd() == null || part.getSd().getLocation() == null) { throw new MetaException("Partition metadata is corrupted"); } + if (!ms.dropPartition(db_name, tbl_name, part_vals)) { throw new MetaException("Unable to drop partition"); } + success = ms.commitTransaction(); partPath = new Path(part.getSd().getLocation()); tbl = get_table(db_name, tbl_name); @@ -2136,6 +2152,505 @@ return ret; } + @Override + public PrincipalPrivilegeSet get_column_privilege_set(final String dbName, + final String tableName, final String partName, final String columnName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_column_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getColumnPrivilegeSet(dbName, tableName, partName, columnName, userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet get_db_privilege_set(final String dbName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_db_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getDBPrivilegeSet(dbName, userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet get_partition_privilege_set( + final String dbName, final String tableName, final String partName, + final String userName, final List groupNames) + throws MetaException, TException { + incrementCounter("get_partition_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getPartitionPrivilegeSet(dbName, tableName, partName, + userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet get_table_privilege_set(final String dbName, + final String tableName, final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_table_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getTablePrivilegeSet(dbName, tableName, userName, + groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean add_role_member(final String roleName, + final String userName, final boolean isRole, final boolean isGroup, + final String databaseName) throws MetaException, TException { + incrementCounter("add_role_member"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + Role role = ms.getRole(roleName, databaseName); + return ms.addRoleMember(role, userName, isRole, isGroup); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_roles(final String principalName, + final boolean isRole, final boolean isGroup, final String databaseName) throws MetaException, TException { + incrementCounter("list_roles"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List result = new ArrayList(); + List roleMap = ms.listRoles(principalName, + isRole, isGroup, databaseName); + if (roleMap!=null) { + Database db = get_database(databaseName); + for (MSecurityUserRoleMap role : roleMap) { + MSecurityRoleEntity r = role.getRole(); + result.add(new Role(r.getRoleName(), db, r + .getCreateTime(), r.getOwnerName())); + } + } + return result; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean create_role(final String roleName, final String ownerName, + final String databName) + throws MetaException, TException { + incrementCounter("create_role"); + + Boolean ret = null; + try { + + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.addRole(roleName, ownerName, databName); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean drop_role(final String roleName, final String databaseName) + throws MetaException, TException { + incrementCounter("drop_role"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.removeRole(roleName, databaseName); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean grant_privileges(final String userName, final boolean isRole, + final boolean isGroup, final PrivilegeBag privileges, final String grantor) throws MetaException, + TException { + incrementCounter("grant_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.grantPrivileges(userName, isRole, isGroup, privileges, grantor); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean remove_role_member(final String roleName, final String userName, + final boolean isRole, final boolean isGroup, final String databaseName) throws MetaException, TException { + incrementCounter("remove_role_member"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + Role mRole = ms.getRole(roleName, databaseName); + return ms.removeRoleMember(mRole, userName, isRole, isGroup); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean revoke_privileges(final String userName, final boolean isRole, + final boolean isGroup, final PrivilegeBag privileges) throws MetaException, + TException { + incrementCounter("revoke_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.revokePrivileges(userName, isRole, isGroup, privileges); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet get_user_privilege_set(final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_user_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getUserPrivilegeSet(userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean revoke_all_privileges(final String userName, + final boolean isRole, final boolean isGroup, + final boolean removeUserPriv, final List dbs, + final List
tables, final List parts, + final Map> columns) throws MetaException, + TException { + incrementCounter("revoke_all_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.revokeAllPrivileges(userName, isRole, isGroup, + removeUserPriv, dbs, tables, parts, columns); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_security_column_grant( + final String principalName, final boolean isGroup, + final boolean isRole, final String dbName, final String tableName, + final String partName, final String columnName) throws MetaException, + TException { + incrementCounter("list_security_column_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mCols = ms.listMSecurityTabOrPartColumnGrant(principalName, + isGroup, isRole, dbName, tableName, partName, columnName); + Table tbl = ms.getTable(dbName, tableName); + Partition part = null; + if (partName != null) { + part = get_partition_by_name(dbName, tableName, partName); + } + + if (mCols.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mCols.size(); i++) { + MSecurityColumn sCol = mCols.get(i); + SecurityColumn col = new SecurityColumn( + sCol.getPrincipalName(), sCol.getIsRole(), sCol + .getIsGroup(), sCol.getPrivileges(), sCol + .getCreateTime(), sCol.getGrantor(), tbl, part, sCol + .getColumnName()); + result.add(col); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_security_db_grant(final String principalName, + final boolean isGroup, final boolean isRole, final String dbName) + throws MetaException, TException { + incrementCounter("list_security_db_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mDbs = ms.listMSecurityPrincipalDBGrant( + principalName, isGroup, isRole, dbName); + Database db = ms.getDatabase(dbName); + if (mDbs.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mDbs.size(); i++) { + MSecurityDB sDB = mDbs.get(i); + SecurityDB secdb = new SecurityDB(sDB.getPrincipalName(), sDB + .getIsRole(), sDB.getIsGroup(), sDB.getPrivileges(), sDB + .getCreateTime(), sDB.getGrantor(), db); + result.add(secdb); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_security_partition_grant( + final String principalName, final boolean isGroup, + final boolean isRole, final String dbName, final String tableName, + final String partName) throws MetaException, TException { + incrementCounter("list_security_partition_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mParts = ms + .listMSecurityPrincipalPartitionGrant(principalName, isGroup, + isRole, dbName, tableName, partName); + Partition partObj = get_partition_by_name(dbName, tableName, + partName); + if (mParts.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mParts.size(); i++) { + MSecurityTablePartition sPart = mParts.get(i); + SecurityTablePartition secPart = new SecurityTablePartition( + sPart.getPrincipalName(), sPart.getIsRole(), sPart + .getIsGroup(), sPart.getPrivileges(), sPart + .getCreateTime(), sPart.getGrantor(), null, partObj); + result.add(secPart); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_security_table_grant( + final String principalName, final boolean isGroup, + final boolean isRole, final String dbName, final String tableName) + throws MetaException, TException { + incrementCounter("list_security_table_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mTbls = ms + .listMSecurityPrincipalTableGrant(principalName, isGroup, + isRole, dbName, tableName); + Table tblObj = ms.getTable(dbName, tableName); + if (mTbls.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mTbls.size(); i++) { + MSecurityTablePartition sTbl = mTbls.get(i); + SecurityTablePartition secPart = new SecurityTablePartition( + sTbl.getPrincipalName(), sTbl.getIsRole(), sTbl + .getIsGroup(), sTbl.getPrivileges(), sTbl + .getCreateTime(), sTbl.getGrantor(), tblObj, null); + result.add(secPart); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_security_user_grant( + final String principlaName, final boolean isRole, final boolean isGroup) + throws MetaException, TException { + incrementCounter("list_security_user_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mUsers = ms.listMSecurityPrincipalUserGrant( + principlaName, isRole, isGroup); + if (mUsers.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mUsers.size(); i++) { + MSecurityUser sUsr = mUsers.get(i); + SecurityUser secUser = new SecurityUser( + sUsr.getPrincipalName(), sUsr.getIsRole(), sUsr + .getIsGroup(), sUsr.getPrivileges(), sUsr + .getCreateTime(), sUsr.getGrantor()); + result.add(secUser); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + } /** Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1036686) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -40,6 +40,13 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SecurityColumn; +import org.apache.hadoop.hive.metastore.api.SecurityDB; +import org.apache.hadoop.hive.metastore.api.SecurityTablePartition; +import org.apache.hadoop.hive.metastore.api.SecurityUser; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.api.Type; @@ -867,4 +874,132 @@ return client.drop_index_by_name(dbName, tblName, name, deleteData); } + @Override + public boolean add_role_member(String roleName, String userName, + boolean isRole, boolean isGroup, String dbName) throws MetaException, TException { + return client.add_role_member(roleName, userName, isRole, isGroup, dbName); + } + + @Override + public boolean create_role(String roleName, String ownerName, String dbName) + throws MetaException, TException { + return client.create_role(roleName, ownerName, dbName); + } + + @Override + public boolean drop_role(String roleName, String dbName) throws MetaException, TException { + return client.drop_role(roleName, dbName); + } + + @Override + public List list_roles(String principalName, + boolean isRole, boolean isGroup, String dbName) throws MetaException, TException { + return client.list_roles(principalName, isRole, isGroup, dbName); + } + + @Override + public PrincipalPrivilegeSet get_column_privilege_set(String dbName, + String tableName, String partName, String columnName, String userName, + List groupNames) throws MetaException, TException { + return client.get_column_privilege_set(dbName, tableName, partName, + columnName, userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet get_db_privilege_set(String dbName, + String userName, List groupNames) throws MetaException, + TException { + return client.get_db_privilege_set(dbName, userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet get_partition_privilege_set(String dbName, + String tableName, String partName, String userName, + List groupNames) throws MetaException, TException { + return client.get_partition_privilege_set(dbName, tableName, partName, + userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet get_table_privilege_set(String dbName, + String tableName, String userName, List groupNames) + throws MetaException, TException { + return client.get_table_privilege_set(dbName, tableName, userName, + groupNames); + } + + @Override + public PrincipalPrivilegeSet get_user_privilege_set(String userName, + List groupNames) throws MetaException, TException { + return client.get_user_privilege_set(userName, groupNames); + } + + @Override + public boolean grant_privileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges, String grantor) + throws MetaException, TException { + return client.grant_privileges(userName, isRole, isGroup, privileges, + grantor); + } + + @Override + public boolean remove_role_member(String roleName, String userName, + boolean isRole, boolean isGroup, String dbName) throws MetaException, TException { + return client.remove_role_member(roleName, userName, isRole, isGroup, dbName); + } + + @Override + public boolean revoke_all_privileges(String userName, boolean isRole, + boolean isGroup, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException, TException { + return client.revoke_all_privileges(userName, isRole, isGroup, + removeUserPriv, dbs, tables, parts, columns); + } + + @Override + public boolean revoke_privileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges) throws MetaException, + TException { + return client.revoke_privileges(userName, isRole, isGroup, privileges); + } + + @Override + public List list_security_column_grant(String principalName, + boolean isGroup, boolean isRole, String dbName, String tableName, + String partName, String columnName) throws MetaException, TException { + return client.list_security_column_grant(principalName, isGroup, isRole, + dbName, tableName, partName, columnName); + } + + @Override + public List list_security_db_grant(String principalName, + boolean isGroup, boolean isRole, String dbName) throws MetaException, + TException { + return client + .list_security_db_grant(principalName, isGroup, isRole, dbName); + } + + @Override + public List list_security_partition_grant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partName) throws MetaException, TException { + return client.list_security_partition_grant(principalName, isGroup, isRole, + dbName, tableName, partName); + } + + @Override + public List list_security_table_grant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName) throws MetaException, TException { + return client.list_security_table_grant(principalName, isGroup, isRole, + dbName, tableName); + } + + @Override + public List list_security_user_grant(String principlaName, + boolean isRole, boolean isGroup) throws MetaException, TException { + return client.list_security_user_grant(principlaName, isRole, isGroup); + } + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1036686) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -31,6 +31,13 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SecurityColumn; +import org.apache.hadoop.hive.metastore.api.SecurityDB; +import org.apache.hadoop.hive.metastore.api.SecurityTablePartition; +import org.apache.hadoop.hive.metastore.api.SecurityUser; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; @@ -475,4 +482,297 @@ public boolean dropIndex(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + + /** + * @param user_name + * user name + * @param group_names + * group names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_user_privilege_set(String user_name, + List group_names) throws MetaException, TException; + + /** + * @param db_name + * database name + * @param user_name + * user name + * @param group_names + * group names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_db_privilege_set(String db_name, + String user_name, List group_names) throws MetaException, + TException; + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param user_name + * user name + * @param group_names + * group names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_table_privilege_set(String db_name, + String table_name, String user_name, List group_names) + throws MetaException, TException; + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param part_name + * partition name + * @param user_name + * user name + * @param group_names + * group names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_partition_privilege_set(String db_name, + String table_name, String part_name, String user_name, + List group_names) throws MetaException, TException; + + /** + * @param db_name + * database name + * @param table_name + * table name + * @param part_name + * partition name + * @param column_name + * column name + * @param user_name + * user name + * @param group_names + * group names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_column_privilege_set(String db_name, + String table_name, String part_name, String column_name, + String user_name, List group_names) throws MetaException, + TException; + + /** + * @param role_name + * role name + * @param owner_name + * owner name + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean create_role(String role_name, String owner_name, String db_name) + throws MetaException, TException; + + /** + * @param role_name + * role name + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean drop_role(String role_name, String db_name) throws MetaException, TException; + + /** + * @param role_name + * role name + * @param user_name + * user name + * @param is_role + * is the given user name a role name + * @param is_group + * is the given user name a group name + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean add_role_member(String role_name, String user_name, + boolean is_role, boolean is_group, String db_name) throws MetaException, TException; + + /** + * @param role_name + * role name + * @param user_name + * user name + * @param is_role + * is the given user name a role + * @param is_group + * is the given group name a group + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean remove_role_member(String role_name, String user_name, + boolean is_role, boolean is_group, String db_name) throws MetaException, TException; + + /** + * @param principalName + * @param isRole + * @param isGroup + * @return + * @throws MetaException + * @throws TException + */ + public List list_roles(String principalName, boolean isRole, + boolean isGroup, String db_name) throws MetaException, TException; + + /** + * @param user_name + * user name + * @param is_role + * is the given user name a role + * @param is_group + * is the given user name a group + * @param privileges + * a bag of privilege - including user level, db level, table level, + * and column level + * @param grantor + * the name of the grantor + * @return + * @throws MetaException + * @throws TException + */ + public boolean grant_privileges(String user_name, boolean is_role, + boolean is_group, PrivilegeBag privileges, String grantor) + throws MetaException, TException; + + /** + * @param user_name + * user name + * @param is_role + * is the given user name a role + * @param is_group + * is the given user name a group + * @param privileges + * a bag of privileges + * @return + * @throws MetaException + * @throws TException + */ + public boolean revoke_privileges(String user_name, boolean is_role, + boolean is_group, PrivilegeBag privileges) throws MetaException, + TException; + + /** + * @param user_name + * user name + * @param is_role + * is the given user name a role + * @param is_group + * is the given user name a group + * @param remove_user_priv + * true if need to remove all user level privileges that were + * assigned the the given principal + * @param dbs + * a list of database on which all db level privileges for the given + * principal are going to be remove. + * @param tables + * a list of tables on which all table level privileges for the given + * principal are going to be removed. + * @param parts + * a list of partitions on on which all partition level privileges + * for the given principal are going to be removed. + * @param columns + * a list of columns on on which all column level privileges for the + * given principal are going to be removed. + * @return + * @throws MetaException + * @throws TException + */ + public boolean revoke_all_privileges(String user_name, boolean is_role, + boolean is_group, boolean remove_user_priv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException, TException; + + /** + * @param principla_name + * @param is_role + * @param is_group + * @return + * @throws MetaException + * @throws TException + */ + public List list_security_user_grant(String principla_name, + boolean is_role, boolean is_group) throws MetaException, TException; + + /** + * @param principal_name + * @param is_group + * @param is_role + * @param db_name + * @return + * @throws MetaException + * @throws TException + */ + public List list_security_db_grant(String principal_name, + boolean is_group, boolean is_role, String db_name) throws MetaException, + TException; + + /** + * @param principal_name + * @param is_group + * @param is_role + * @param db_name + * @param table_name + * @return + * @throws MetaException + * @throws TException + */ + public List list_security_table_grant( + String principal_name, boolean is_group, boolean is_role, String db_name, + String table_name) throws MetaException, TException; + + /** + * @param principal_name + * @param is_group + * @param is_role + * @param db_name + * @param table_name + * @param part_name + * @return + * @throws MetaException + * @throws TException + */ + public List list_security_partition_grant( + String principal_name, boolean is_group, boolean is_role, String db_name, + String table_name, String part_name) throws MetaException, TException; + + /** + * @param principal_name + * @param is_group + * @param is_role + * @param db_name + * @param table_name + * @param column_name + * @return + * @throws MetaException + * @throws TException + */ + public List list_security_column_grant(String principal_name, + boolean is_group, boolean is_role, String db_name, String table_name, + String part_name, String column_name) throws MetaException, TException; } Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1036686) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -44,7 +44,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.ColumnPrivilegeBag; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; @@ -53,6 +55,9 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; @@ -62,6 +67,12 @@ import org.apache.hadoop.hive.metastore.model.MIndex; import org.apache.hadoop.hive.metastore.model.MOrder; import org.apache.hadoop.hive.metastore.model.MPartition; +import org.apache.hadoop.hive.metastore.model.MSecurityColumn; +import org.apache.hadoop.hive.metastore.model.MSecurityDB; +import org.apache.hadoop.hive.metastore.model.MSecurityRoleEntity; +import org.apache.hadoop.hive.metastore.model.MSecurityTablePartition; +import org.apache.hadoop.hive.metastore.model.MSecurityUser; +import org.apache.hadoop.hive.metastore.model.MSecurityUserRoleMap; import org.apache.hadoop.hive.metastore.model.MSerDeInfo; import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MTable; @@ -536,6 +547,16 @@ pm.retrieve(tbl); if (tbl != null) { // first remove all the partitions + List tabParts = listMSecurityTablePart(dbName, + tableName, false); + if (tabParts != null && tabParts.size() > 0) { + pm.deletePersistentAll(tabParts); + } + List colGrants = listMSecurityTablePartColumn(dbName, + tableName, false); + if (colGrants != null && colGrants.size() > 0) { + pm.deletePersistentAll(colGrants); + } pm.deletePersistentAll(listMPartitions(dbName, tableName, -1)); // then remove the table pm.deletePersistent(tbl); @@ -548,7 +569,7 @@ } return success; } - + public Table getTable(String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; @@ -651,7 +672,7 @@ .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), mtbl.getParameters(), mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), - tableType); + tableType, null); } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -784,9 +805,39 @@ boolean success = false; boolean commited = false; try { + List tabGrants = this.listMSecurityTablePart(part + .getDbName(), part.getTableName(), true); + List tabColumnGrants = this.listMSecurityTablePartColumn( + part.getDbName(), part.getTableName(), true); openTransaction(); MPartition mpart = convertToMPart(part); pm.makePersistent(mpart); + + int now = (int)(System.currentTimeMillis()/1000); + List toPersist = new ArrayList(); + if (tabGrants != null) { + for (MSecurityTablePartition tab: tabGrants) { + MSecurityTablePartition partGrant = new MSecurityTablePartition(tab + .getPrincipalName(), tab.getIsRole(), tab.getIsGroup(), tab + .getTable(), mpart, tab.getPrivileges(), now, tab.getGrantor()); + toPersist.add(partGrant); + } + } + + if (tabColumnGrants != null) { + for (MSecurityColumn col : tabColumnGrants) { + MSecurityColumn partColumn = new MSecurityColumn(col + .getPrincipalName(), col.getIsRole(), col.getIsGroup(), col + .getTable(), mpart, col.getColumnName(), col.getPrivileges(), + now, col.getGrantor()); + toPersist.add(partColumn); + } + + if (toPersist.size() > 0) { + pm.makePersistentAll(toPersist); + } + } + commited = commitTransaction(); success = true; } finally { @@ -803,7 +854,8 @@ Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); commitTransaction(); if(part == null) { - throw new NoSuchObjectException(); + throw new NoSuchObjectException("partition values=" + + part_vals.toString()); } return part; } @@ -863,7 +915,7 @@ return new Partition(mpart.getValues(), mpart.getTable().getDatabase() .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), - mpart.getParameters()); + mpart.getParameters(), null); } public boolean dropPartition(String dbName, String tableName, @@ -873,6 +925,26 @@ openTransaction(); MPartition part = getMPartition(dbName, tableName, part_vals); if (part != null) { + List schemas = part.getTable().getPartitionKeys(); + List colNames = new ArrayList(); + for (MFieldSchema col: schemas) { + colNames.add(col.getName()); + } + String partName = FileUtils.makePartName(colNames, part_vals); + + List partGrants = listMSecurityPartition( + dbName, tableName, partName); + + if (partGrants != null && partGrants.size() > 0) { + pm.deletePersistentAll(partGrants); + } + + List partColumnGrants = listMSecurityPartitionColumn( + dbName, tableName, partName); + if (partColumnGrants != null && partColumnGrants.size() > 0) { + pm.deletePersistentAll(partColumnGrants); + } + pm.deletePersistent(part); } success = commitTransaction(); @@ -1395,4 +1467,1274 @@ } return pns; } + + @Override + public boolean addRole(String roleName, String ownerName, String dbName) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean success = false; + boolean commited = false; + try { + openTransaction(); + MDatabase db = this.getMDatabase(dbName); + MSecurityRoleEntity nameCheck = this.getMRole(roleName, dbName); + if (nameCheck != null) { + throw new RuntimeException("Role " + roleName + " already exists."); + } + int now = (int)(System.currentTimeMillis()/1000); + MSecurityRoleEntity mRole = new MSecurityRoleEntity(roleName, now, + ownerName, db); + pm.makePersistent(mRole); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean addRoleMember(Role role, String userName, boolean isRole, + boolean isGroup) throws MetaException, NoSuchObjectException { + boolean success = false; + boolean commited = false; + try { + MSecurityUserRoleMap roleMap = null; + try { + roleMap = this.getMSecurityUserRoleMap(userName, isRole, isGroup, role + .getRoleName(), role.getDatabase().getName()); + } catch (Exception e) { + e.printStackTrace(); + } + if (roleMap != null) { + throw new RuntimeException("Principal " + userName + + " already has the role " + role.getRoleName()); + } + openTransaction(); + MSecurityRoleEntity mRole = getMRole(role.getRoleName(), role + .getDatabase().getName()); + long now = System.currentTimeMillis()/1000; + MSecurityUserRoleMap roleMember = new MSecurityUserRoleMap(userName, + isRole, isGroup, mRole, (int) now); + pm.makePersistent(roleMember); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean removeRoleMember(Role role, String userName, boolean isRole, + boolean isGroup) throws MetaException, NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MSecurityUserRoleMap roleMember = getMSecurityUserRoleMap(userName, + isRole, isGroup, role.getRoleName(), role.getDatabase().getName()); + pm.deletePersistent(roleMember); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private MSecurityUserRoleMap getMSecurityUserRoleMap(String userName, + boolean isRole, boolean isGroup, String roleName, String dbName) { + MSecurityUserRoleMap mRoleMember = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MSecurityUserRoleMap.class, "principalName == t1 && isRole==t2 && isGroup == t3 && role.roleName == t4 && role.database.name == t5"); + query.declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4, java.lang.String t5"); + query.setUnique(true); + mRoleMember = (MSecurityUserRoleMap) query.executeWithArray(userName, isRole, isGroup, roleName, dbName); + pm.retrieve(mRoleMember); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mRoleMember; + } + + @Override + public boolean removeRole(String roleName, String dbName) throws MetaException, + NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MSecurityRoleEntity mRol = getMRole(roleName, dbName); + pm.retrieve(mRol); + if (mRol != null) { + // first remove all the membership, the membership that this role has + // been granted + List roleMap = listMSecurityUserRoleMember(mRol); + if (roleMap.size() > 0) { + pm.deletePersistentAll(roleMap); + } + List roleMember = listMSecurityPrincipalMembershipRole( + mRol.getRoleName(), true, false); + if (roleMember.size() > 0) { + pm.deletePersistentAll(roleMember); + } + // then remove all the grants + List userGrants = listMSecurityPrincipalUserGrant( + mRol.getRoleName(), true, false); + if (userGrants.size() > 0) { + pm.deletePersistentAll(userGrants); + } + List dbGrants = listAllMSecurityPrincipalDBGrant( + mRol.getRoleName(), true, false); + if (dbGrants.size() > 0) { + pm.deletePersistentAll(dbGrants); + } + List tabPartGrants = listAllMSecurityPrincipalTablePartGrant( + mRol.getRoleName(), true, false); + if (tabPartGrants.size() > 0) { + pm.deletePersistentAll(tabPartGrants); + } + List columnGrants = listAllMSecurityPrincipalColumnGrant( + mRol.getRoleName(), true, false); + if (columnGrants.size() > 0) { + pm.deletePersistentAll(columnGrants); + } + // last remove the role + pm.deletePersistent(mRol); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private List listRoles(String userName, + List groupNames, String databaseName) { + List ret = new ArrayList(); + if(userName != null) { + ret.addAll(listRoles(userName, false, false, databaseName)); + } + if (groupNames != null) { + for (String groupName: groupNames) { + ret.addAll(listRoles(groupName, false, true, databaseName)); + } + } + return ret; + } + + @SuppressWarnings("unchecked") + public List listRoles(String principalName, + boolean isRole, boolean isGroup, String databaseName) { + boolean success = false; + List mRoleMember = null; + try { + openTransaction(); + LOG.debug("Executing listRoles"); + Query query = pm + .newQuery( + MSecurityUserRoleMap.class, + "principalName == t1 && isGroup == t2 && isRole == t3 && role.database.name == t4"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4"); + query.setUnique(false); + mRoleMember = (List) query.executeWithArray( + principalName, isGroup, isRole, databaseName); + LOG.debug("Done executing query for listMSecurityUserRoleMap"); + pm.retrieveAll(mRoleMember); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMap"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMember; + } + + + @SuppressWarnings("unchecked") + private List listMSecurityPrincipalMembershipRole(final String roleName, + final boolean isRole, final boolean isGroup) { + boolean success = false; + List mRoleMemebership = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalMembershipRole"); + Query query = pm.newQuery(MSecurityUserRoleMap.class, + "principalName == t1 && isRole == t2 && isGroup == t3"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3"); + mRoleMemebership = (List) query.execute(roleName, + isRole, isGroup); + LOG + .debug("Done executing query for listMSecurityPrincipalMembershipRole"); + pm.retrieveAll(mRoleMemebership); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemebership; + } + + public Role getRole(String roleName, String dbName) throws NoSuchObjectException { + MSecurityRoleEntity mRole = this.getMRole(roleName, dbName); + if (mRole == null) { + throw new NoSuchObjectException(roleName + " role can not be found."); + } + Role ret = new Role(mRole.getRoleName(), this.getDatabase(mRole + .getDatabase().getName()), mRole.getCreateTime(), mRole.getOwnerName()); + return ret; + } + + private MSecurityRoleEntity getMRole(String roleName, String dbName) { + MSecurityRoleEntity mrole = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MSecurityRoleEntity.class, "roleName == t1 && database.name == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.setUnique(true); + mrole = (MSecurityRoleEntity) query.execute(roleName, dbName); + pm.retrieve(mrole); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mrole; + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + List user = this.listMSecurityPrincipalUserGrant(userName, false, false); + if(user.size()>0) { + Map userPriv = new HashMap(); + String userPrivStr = user.get(0).getPrivileges(); + for (int i = 1; i < user.size(); i++) { + userPrivStr = userPrivStr + ";" + user.get(i).getPrivileges(); + } + userPriv.put(userName, userPrivStr); + ret.setUserPrivileges(userPriv); + } + } + if (groupNames != null && groupNames.size() > 0) { + Map groupPriv = new HashMap(); + for(String groupName: groupNames) { + List group = this.listMSecurityPrincipalUserGrant(groupName, false, true); + if(group.size()>0) { + String groupPrivStr = group.get(0).getPrivileges(); + for (int i = 1; i < group.size(); i++) { + groupPrivStr = groupPrivStr + ";" + group.get(i).getPrivileges(); + } + groupPriv.put(groupName, groupPrivStr); + } + } + ret.setGroupPrivileges(groupPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map userDbPriv = new HashMap(); + userDbPriv.put(userName, getDBPrivilege(dbName, userName, false, false)); + ret.setUserPrivileges(userDbPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map groupDbPriv = new HashMap(); + for (String groupName : groupNames) { + groupDbPriv.put(groupName, getDBPrivilege(dbName, groupName, false, + true)); + } + ret.setGroupPrivileges(groupDbPriv); + } + List roles = listRoles(userName, groupNames, dbName); + if (roles != null && roles.size() > 0) { + Map roleDbPriv = new HashMap(); + for (MSecurityUserRoleMap role : roles) { + String name = role.getRole().getRoleName(); + roleDbPriv.put(name, getDBPrivilege(dbName, name, true, false)); + } + ret.setRolePrivileges(roleDbPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + String tableName, String partition, String userName, + List groupNames) throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map userPartitionPriv = new HashMap(); + userPartitionPriv.put(userName, getPartitionPrivilege(dbName, + tableName, partition, userName, false, false)); + ret.setUserPrivileges(userPartitionPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map groupPartitionPriv = new HashMap(); + for (String groupName : groupNames) { + groupPartitionPriv.put(groupName, getPartitionPrivilege(dbName, tableName, + partition, groupName, false, true)); + } + ret.setGroupPrivileges(groupPartitionPriv); + } + List roles = listRoles(userName, groupNames, dbName); + if (roles != null && roles.size() > 0) { + Map rolePartPriv = new HashMap(); + for (MSecurityUserRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + rolePartPriv.put(roleName, getPartitionPrivilege(dbName, tableName, + partition, roleName, true, false)); + } + ret.setRolePrivileges(rolePartPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + String tableName, String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map userPartitionPriv = new HashMap(); + userPartitionPriv.put(userName, getTablePrivilege(dbName, + tableName, userName, false, false)); + ret.setUserPrivileges(userPartitionPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map groupPartitionPriv = new HashMap(); + for (String groupName : groupNames) { + groupPartitionPriv.put(groupName, getTablePrivilege(dbName, tableName, + groupName, false, true)); + } + ret.setGroupPrivileges(groupPartitionPriv); + } + List roles = listRoles(userName, groupNames, dbName); + if (roles != null && roles.size() > 0) { + Map rolePartPriv = new HashMap(); + for (MSecurityUserRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + rolePartPriv.put(roleName, getTablePrivilege(dbName, tableName, + roleName, true, false)); + } + ret.setRolePrivileges(rolePartPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + String tableName, String partitionName, String columnName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map userPartitionPriv = new HashMap(); + userPartitionPriv.put(userName, getColumnPrivilege(dbName, tableName, + columnName, partitionName, userName, false, false)); + ret.setUserPrivileges(userPartitionPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map groupPartitionPriv = new HashMap(); + for (String groupName : groupNames) { + groupPartitionPriv.put(groupName, getColumnPrivilege(dbName, + tableName, columnName, partitionName, groupName, false, true)); + } + ret.setGroupPrivileges(groupPartitionPriv); + } + List roles = listRoles(userName, groupNames, dbName); + if (roles != null && roles.size() > 0) { + Map rolePartPriv = new HashMap(); + for (MSecurityUserRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + rolePartPriv.put(roleName, getColumnPrivilege(dbName, tableName, + columnName, partitionName, roleName, true, false)); + } + ret.setRolePrivileges(rolePartPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public boolean grantPrivileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges, String grantor) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean committed = false; + int now = (int)(System.currentTimeMillis()/1000); + try { + openTransaction(); + List persistentObjs = new ArrayList(); + + String userPrivs = privileges.getUserPrivileges(); + if (userPrivs != null) { + MSecurityUser mDb = new MSecurityUser(userName, isRole, isGroup, + userPrivs, now, grantor); + persistentObjs.add(mDb); + } + + Map dbPrivs = privileges.getDbPrivileges(); + if (dbPrivs != null) { + for (Map.Entry db : dbPrivs.entrySet()) { + MDatabase dbObj = getMDatabase(db.getKey().getName()); + if (dbObj != null) { + MSecurityDB mDb = new MSecurityDB(userName, isRole, isGroup, dbObj, + db.getValue(), now, grantor); + persistentObjs.add(mDb); + } + } + } + + Map tablePriv = privileges.getTablePrivileges(); + if (tablePriv != null) { + for (Map.Entry table : tablePriv.entrySet()) { + MTable tblObj = getMTable(table.getKey().getDbName(), table.getKey() + .getTableName()); + if (tblObj != null) { + MSecurityTablePartition mTab = new MSecurityTablePartition( + userName, isRole, isGroup, tblObj, null, table.getValue(), now, + grantor); + persistentObjs.add(mTab); + } + } + } + + Map partitionPriv = privileges.getPartitionPrivileges(); + if (partitionPriv != null) { + for (Map.Entry part : partitionPriv.entrySet()) { + Partition partObj = part.getKey(); + MPartition tblObj = this.getMPartition(partObj.getDbName(), partObj + .getTableName(), partObj.getValues()); + if (tblObj != null) { + MSecurityTablePartition mTab = new MSecurityTablePartition( + userName, isRole, isGroup, tblObj.getTable(), tblObj, part + .getValue(), now, grantor); + persistentObjs.add(mTab); + } + } + } + + List columnPriv = privileges.getColumnPrivileges(); + if (columnPriv != null) { + for (ColumnPrivilegeBag col : columnPriv) { + Map columnMap = col.getColumnPrivileges(); + MTable tblObj = getMTable(col.getDbName(), col.getTableName()); + if (columnMap != null && tblObj != null) { + for (Map.Entry colPriv : columnMap.entrySet()) { + MSecurityColumn mCol = new MSecurityColumn(userName, isRole, + isGroup, tblObj, null, colPriv.getKey(), colPriv.getValue(), now, + grantor); + persistentObjs.add(mCol); + } + } + } + } + if (persistentObjs.size() > 0) { + pm.makePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + public boolean revokeAllPrivileges(String userName, boolean isRole, + boolean isGroup, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException { + boolean committed = false; + try { + openTransaction(); + List persistentObjs = new ArrayList(); + if (removeUserPriv) { + List mSecUser = this.listMSecurityPrincipalUserGrant( + userName, isRole, isGroup); + if (mSecUser != null) { + persistentObjs.addAll(persistentObjs); + } + } + + if (dbs != null) { + for (Database db : dbs) { + List dbGrants = this.listMSecurityPrincipalDBGrant( + userName, isGroup, isRole, db.getName()); + persistentObjs.addAll(dbGrants); + } + } + + if(tables != null) { + for (Table tab : tables) { + List tabGrants = + this.listMSecurityPrincipalTableGrant(userName, isGroup, isRole, + tab.getDbName(), tab.getTableName()); + persistentObjs.addAll(tabGrants); + } + } + + if(parts != null) { + for (Partition part : parts) { + Table tabObj = this.getTable(part.getDbName(), part.getTableName()); + List partGrants = this + .listMSecurityPrincipalPartitionGrant(userName, isGroup, isRole, + part.getDbName(), part.getTableName(), + Warehouse.makePartName(tabObj.getPartitionKeys(), part.getValues())); + persistentObjs.addAll(partGrants); + } + } + + if(columns != null) { + for (Map.Entry> tableColMap : columns.entrySet()){ + Table table = tableColMap.getKey(); + List colList = tableColMap.getValue(); + for(String col : colList) { + List secCol = this.listMSecurityPrincipalTableColumnGrant(userName, isGroup, isRole, + table.getDbName(), table.getTableName(), col, false); + persistentObjs.addAll(secCol); + } + } + } + + if (persistentObjs.size() > 0) { + pm.deletePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + + @Override + public boolean revokePrivileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges) throws InvalidObjectException, + MetaException { + boolean committed = false; + try { + openTransaction(); + List persistentObjs = new ArrayList(); + String userPriv = privileges.getUserPrivileges(); + if (userPriv != null && !userPriv.trim().equals("")) { + List mSecUser = this.listMSecurityPrincipalUserGrant( + userName, isRole, isGroup); + boolean found = false; + if (mSecUser != null) { + String[] userPrivArray = userPriv.split(","); + for (MSecurityUser userGrant : mSecUser) { + String[] userGrantArray = userGrant.getPrivileges().split(","); + if (stringArrayEqualsIgnoreOrder(userPrivArray, userGrantArray)) { + found = true; + persistentObjs.add(userGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No user grant found for privileges " + userPriv); + } + } + } + + Map dbPrivileges = privileges.getDbPrivileges(); + if (dbPrivileges != null) { + for (Map.Entry dbPriv : dbPrivileges.entrySet()) { + Database db = dbPriv.getKey(); + String dbPrivStr = dbPriv.getValue(); + boolean found = false; + String[] dbPrivArray = dbPrivStr.split(","); + List dbGrants = this.listMSecurityPrincipalDBGrant( + userName, isGroup, isRole, db.getName()); + for (MSecurityDB dbGrant : dbGrants) { + String[] privStrArray = dbGrant.getPrivileges().split(","); + if (stringArrayEqualsIgnoreOrder(dbPrivArray, privStrArray)) { + found = true; + persistentObjs.add(dbGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No database grant found for privileges " + dbPrivStr + + " on database " + db.getName()); + } + } + } + + Map tablePrivileges = privileges.getTablePrivileges(); + if (tablePrivileges != null) { + for (Map.Entry tabPriv : tablePrivileges.entrySet()) { + Table table = tabPriv.getKey(); + String tblPrivStr = tabPriv.getValue(); + boolean found = false; + String[] tablePrivArray = tblPrivStr.split(","); + List tableGrants = + this.listMSecurityPrincipalTableGrant(userName, isGroup, isRole, + table.getDbName(), table.getTableName()); + for (MSecurityTablePartition tabGrant : tableGrants) { + String[] privStrArray = tabGrant.getPrivileges().split(","); + if (stringArrayEqualsIgnoreOrder(privStrArray, tablePrivArray)) { + found = true; + persistentObjs.add(tabGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + tblPrivStr + + ") found " + " on table " + table.getTableName() + + ", database is " + table.getDbName()); + } + } + } + + Map partPrivileges = privileges + .getPartitionPrivileges(); + if (partPrivileges != null) { + for (Map.Entry partPriv : partPrivileges.entrySet()) { + Partition part = partPriv.getKey(); + String partPrivStr = partPriv.getValue(); + boolean found = false; + String[] partPrivArray = partPrivStr.split(","); + Table tabObj = this.getTable(part.getDbName(), part.getTableName()); + List partitionGrants = this + .listMSecurityPrincipalPartitionGrant(userName, isGroup, isRole, + part.getDbName(), part.getTableName(), + Warehouse.makePartName(tabObj.getPartitionKeys(), part + .getValues())); + for (MSecurityTablePartition tabGrant : partitionGrants) { + String[] privStrArray = tabGrant.getPrivileges().split(","); + if (stringArrayEqualsIgnoreOrder(privStrArray, partPrivArray)) { + found = true; + persistentObjs.add(tabGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + partPrivStr + + ") found " + " on table " + tabObj.getTableName() + + ", database is " + tabObj.getDbName()); + } + } + } + + List columnPrivileges = privileges + .getColumnPrivileges(); + if (columnPrivileges != null) { + for (ColumnPrivilegeBag colPriv : columnPrivileges) { + String dbName = colPriv.getDbName(); + String tabName = colPriv.getTableName(); + String partName = colPriv.getPartitionName(); + Map colPrivMap = colPriv.getColumnPrivileges(); + for (Map.Entry column : colPrivMap.entrySet()) { + List mSecCol = listMSecurityTabOrPartColumnGrant(userName, + isRole, isGroup, dbName, tabName, partName, column.getKey()); + boolean found = false; + if (mSecCol != null) { + String[] toBeMatched = column.getValue().split(","); + for (MSecurityColumn col : mSecCol) { + String[] candicate = col.getPrivileges().split(","); + if (stringArrayEqualsIgnoreOrder(candicate, toBeMatched)) { + found = true; + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + column + + ") found " + " on column " + column.getKey() + " table " + + tabName + ", database is " + dbName); + } + } + } + } + } + if (persistentObjs.size() > 0) { + pm.deletePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + private boolean stringArrayEqualsIgnoreOrder(String[] o1, String[] o2) { + if (o1 == o2) { + return true; + } + if (o1 != null && o2 != null) { + for (int i = 0; i < o1.length; i++) { + boolean found = false; + for (int j = 0; j < o2.length; j++) { + if (o1[i].equalsIgnoreCase(o2[j])) { + found = true; + break; + } + } + + if (!found) { + return false; + } + } + return true; + } else { + return false; + } + } + + public String getDBPrivilege(String dbName, + String principalName, boolean isRole, boolean isGroup) + throws InvalidObjectException, MetaException { + String privileges = null; + if (principalName != null) { + List userNameDbPriv = this.listMSecurityPrincipalDBGrant( + principalName, isGroup, isRole, dbName); + if (userNameDbPriv != null && userNameDbPriv.size() > 0) { + privileges = userNameDbPriv.get(0).getPrivileges(); + for (int i = 1; i < userNameDbPriv.size(); i++) { + privileges = privileges + "," + + userNameDbPriv.get(i).getPrivileges(); + } + } + } + return privileges; + } + + private String getTablePrivilege(String dbName, String tableName, + String principalName, boolean isRole, boolean isGroup) { + String privileges = null; + if (principalName != null) { + List userNameTabPartPriv = this + .listMSecurityPrincipalTableGrant(principalName, isGroup, isRole, + dbName, tableName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + privileges = userNameTabPartPriv.get(0).getPrivileges(); + for (int i = 1; i < userNameTabPartPriv.size(); i++) { + privileges = privileges + "," + + userNameTabPartPriv.get(i).getPrivileges(); + } + } + } + return privileges; + } + + private String getPartitionPrivilege(String dbName, String tableName, + String partName, String principalName, boolean isRole, boolean isGroup) { + String privileges = null; + if (principalName != null) { + List userNameTabPartPriv = this + .listMSecurityPrincipalPartitionGrant(principalName, isGroup, isRole, + dbName, tableName, partName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + privileges = userNameTabPartPriv.get(0).getPrivileges(); + for (int i = 1; i < userNameTabPartPriv.size(); i++) { + privileges = privileges + "," + + userNameTabPartPriv.get(i).getPrivileges(); + } + } + } + return privileges; + } + + private String getColumnPrivilege(String dbName, String tableName, + String columnName, String partitionName, String principalName, boolean isRole, boolean isGroup) { + String privileges = null; + List userNameTabPartPriv = null; + if (partitionName == null) { + userNameTabPartPriv = this.listMSecurityPrincipalTableColumnGrant( + principalName, isGroup, isRole, dbName, tableName, columnName, true); + } else { + userNameTabPartPriv = this.listMSecurityPrincipalPartitionColumnGrant( + principalName, isGroup, isRole, dbName, tableName, partitionName, + columnName); + } + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + privileges = userNameTabPartPriv.get(0).getPrivileges(); + for (int i = 1; i < userNameTabPartPriv.size(); i++) { + privileges = privileges + "," + + userNameTabPartPriv.get(i).getPrivileges(); + } + } + return privileges; + } + + @SuppressWarnings("unchecked") + private List listMSecurityUserRoleMember( + MSecurityRoleEntity mRol) { + boolean success = false; + List mRoleMemeberList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityUserRoleMember"); + Query query = pm.newQuery(MSecurityUserRoleMap.class, + "role.roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(false); + mRoleMemeberList = (List) query.execute( + mRol.getRoleName()); + LOG.debug("Done executing query for listMSecurityUserRoleMember"); + pm.retrieveAll(mRoleMemeberList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMember"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemeberList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalUserGrant(String principlaName, + boolean isRole, boolean isGroup) { + boolean commited = false; + List userNameDbPriv = null; + try { + openTransaction(); + if (principlaName != null) { + Query query = pm.newQuery(MSecurityUser.class, + "principalName == t1 && isRole == t2 && isGroup== t3"); + query.declareParameters( + "java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3"); + userNameDbPriv = (List) query + .executeWithArray(principlaName, isRole, isGroup); + pm.retrieveAll(userNameDbPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return userNameDbPriv; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalDBGrant(String principalName, + boolean isGroup, boolean isRole, String dbName) { + boolean success = false; + List mSecurityDBList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalDBGrant"); + Query query = pm.newQuery(MSecurityDB.class, + "principalName == t1 && isGroup == t2 && isRole == t3 && database.name == t4"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4"); + mSecurityDBList = (List) query.executeWithArray(principalName, + isGroup, isRole, dbName); + LOG.debug("Done executing query for listMSecurityPrincipalDBGrant"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalDBGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalDBGrant(String principalName, + boolean isGroup, boolean isRole) { + boolean success = false; + List mSecurityDBList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalDBGrant"); + Query query = pm.newQuery(MSecurityDB.class, + "principalName == t1 && isGroup == t2 && isRole == t3"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3"); + mSecurityDBList = (List) query.execute(principalName, + isGroup, isRole); + LOG.debug("Done executing query for listAllMSecurityPrincipalDBGrant"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalDBGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityTablePart(String dbName, + String tableName, boolean tableOnly) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTable"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery( + MSecurityTablePartition.class, queryStr); + query.declareParameters( + "java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listMSecurityTable"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityTable"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityTablePartColumn(String dbName, + String tableName, boolean tableOnly) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTablePartColumn"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery(MSecurityColumn.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listMSecurityTablePartColumn"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityTablePartColumn"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + private List listMSecurityPartition(String dbName, String tableName, + String partName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTablePartition"); + Query query = pm.newQuery(MSecurityTablePartition.class, + "table.tableName == t1 && table.database.name == t2 && partition.partitionName == t3"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityTablePartition"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityTablePartition"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPartitionColumn(String dbName, + String tableName, String partName) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPartitionColumn"); + Query query = pm.newQuery( + MSecurityColumn.class, + "table.tableName == t1 && table.database.name == t2 && partition.partitionName == t3"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityColList = (List) query + .executeWithArray(tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityPartitionColumn"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPartitionColumn"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalTableGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalTableGrant"); + Query query = pm.newQuery( + MSecurityTablePartition.class, + "principalName == t1 && isGroup == t2 && isRole == t3 && table.tableName == t4 && table.database.name == t5 && partition == null"); + query.declareParameters( + "java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4, java.lang.String t5"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, isGroup, isRole, tableName, dbName); + LOG.debug("Done executing query for listMSecurityPrincipalTableGrant"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalTableGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalPartitionGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalPartitionGrant"); + Query query = pm.newQuery( + MSecurityTablePartition.class, + "principalName == t1 && isGroup == t2 && isRole == t3 && table.tableName == t4 " + + "&& table.database.name == t5 && partition.partitionName == t6"); + query.declareParameters( + "java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4, java.lang.String t5, java.lang.String t6"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, isGroup, isRole, tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityPrincipalPartitionGrant"); + + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalTablePartGrant( + String principalName, boolean isGroup, boolean isRole) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalTablePartGrant"); + Query query = pm.newQuery(MSecurityTablePartition.class, + "principalName == t1 && isGroup == t2 && isRole == t3"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3"); + mSecurityTabPartList = (List) query.execute( + principalName, isGroup, isRole); + LOG.debug("Done executing query for listAllMSecurityPrincipalTablePartGrant"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalTablePartGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + public List listMSecurityTabOrPartColumnGrant(String userName, + boolean isRole, boolean isGroup, String dbName, String tabName, + String partName, String columnName) { + List mSecCol = null; + if (partName != null) { + mSecCol = this.listMSecurityPrincipalPartitionColumnGrant(userName, + isGroup, isRole, dbName, tabName, partName, columnName); + } else { + mSecCol = this.listMSecurityPrincipalTableColumnGrant(userName, isGroup, + isRole, dbName, tabName, columnName, true); + } + return mSecCol; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalTableColumnGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String columnName, boolean tableOnly) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalColumnGrant"); + String queryStr = "principalName == t1 && isGroup == t2 && isRole == t3 && " + + "table.tableName == t4 && table.database.name == t5 && columnName == t6 "; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery(MSecurityColumn.class, queryStr); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4, java.lang.String t5, java.lang.String t6"); + mSecurityColList = (List) query.executeWithArray( + principalName, isGroup, isRole, tableName, dbName, columnName); + LOG.debug("Done executing query for listMSecurityPrincipalColumnGrant"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalPartitionColumnGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partitionName, String columnName) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalPartitionColumnGrant"); + Query query = pm.newQuery( + MSecurityColumn.class, + "principalName == t1 && isGroup == t2 && isRole == t3 && table.tableName == t4 && table.database.name == t5 && " + + "partition.partitionName == t6 && columnName == t7"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3, java.lang.String t4, java.lang.String t5, java.lang.String t6, java.lang.String t7"); + + mSecurityColList = (List) query.executeWithArray( + principalName, isGroup, isRole, tableName, dbName, partitionName, + columnName); + LOG.debug("Done executing query for listMSecurityPrincipalPartitionColumnGrant"); + pm.retrieveAll(mSecurityColList); + + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalColumnGrant( + String principalName, boolean isGroup, boolean isRole) { + boolean success = false; + List mSecurityColumnList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalColumnGrant"); + Query query = pm.newQuery(MSecurityColumn.class, + "principalName == t1 && isGroup == t2 && isRole == t3"); + query + .declareParameters("java.lang.String t1, java.lang.Boolean t2, java.lang.Boolean t3"); + mSecurityColumnList = (List) query.execute( + principalName, isGroup, isRole); + LOG.debug("Done executing query for listAllMSecurityPrincipalColumnGrant"); + pm.retrieveAll(mSecurityColumnList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColumnList; + } + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1036686) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.Database; @@ -27,8 +28,16 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.model.MSecurityColumn; +import org.apache.hadoop.hive.metastore.model.MSecurityDB; +import org.apache.hadoop.hive.metastore.model.MSecurityTablePartition; +import org.apache.hadoop.hive.metastore.model.MSecurityUser; +import org.apache.hadoop.hive.metastore.model.MSecurityUserRoleMap; public interface RawStore extends Configurable { @@ -128,5 +137,74 @@ public abstract List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; + + public abstract boolean addRole(String rowName, String ownerName, + String dbName) throws InvalidObjectException, MetaException, + NoSuchObjectException; + + public abstract boolean removeRole(String roleName, String dbName) throws MetaException, NoSuchObjectException; + + public abstract boolean addRoleMember(Role role, String userName, boolean isRole, boolean isGroup) + throws MetaException, NoSuchObjectException; + + public abstract boolean removeRoleMember(Role role, String userName, boolean isRole, boolean isGroup) + throws MetaException, NoSuchObjectException; + + public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract List listMSecurityPrincipalUserGrant(String principlaName, + boolean isRole, boolean isGroup); + + public abstract List listMSecurityPrincipalDBGrant(String principalName, + boolean isGroup, boolean isRole, String dbName); + + public abstract List listMSecurityPrincipalTableGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName); + + public abstract List listMSecurityPrincipalPartitionGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partName); + + public List listMSecurityTabOrPartColumnGrant(String userName, + boolean isRole, boolean isGroup, String dbName, String tabName, + String partName, String column); + + public abstract List listMSecurityPrincipalTableColumnGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String columnName, boolean tableOnly); + + public abstract List listMSecurityPrincipalPartitionColumnGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partName, String columnName); + + public abstract boolean grantPrivileges (String userName, boolean isRole, boolean isGroup, PrivilegeBag privileges, String grantor) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + public abstract boolean revokePrivileges (String userName, boolean isRole, boolean isGroup, PrivilegeBag privileges) + throws InvalidObjectException, MetaException; + + public abstract org.apache.hadoop.hive.metastore.api.Role getRole(String roleName, String dbName) throws NoSuchObjectException; + + public List listRoles(String principalName, + boolean isRole, boolean isGroup, String databaseName); + + public boolean revokeAllPrivileges(String userName, boolean isRole, + boolean isGroup, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException; } Index: metastore/src/model/package.jdo =================================================================== --- metastore/src/model/package.jdo (revision 1036686) +++ metastore/src/model/package.jdo (working copy) @@ -250,7 +250,7 @@ - + @@ -345,5 +345,194 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityColumn.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityColumn.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityColumn.java (revision 0) @@ -0,0 +1,180 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MSecurityColumn { + + private String principalName; + + private boolean isRole; + + private boolean isGroup; + + private MTable table; + + private MPartition partition; + + private String columnName; + + private String privileges; + + private int createTime; + + private String grantor; + + public MSecurityColumn() { + } + + /** + * @param principalName + * @param isRole + * @param isGroup + * @param table + * @param partition + * @param columnName + * @param privileges + * @param createTime + * @param grantor + */ + public MSecurityColumn(String principalName, boolean isRole, boolean isGroup, + MTable table, MPartition partition, String columnName, String privileges, int createTime, + String grantor) { + super(); + this.principalName = principalName; + this.isRole = isRole; + this.isGroup = isGroup; + this.table = table; + this.partition = partition; + this.columnName = columnName; + this.privileges = privileges; + this.createTime = createTime; + this.grantor = grantor; + } + + /** + * @return true if this user name is a role + */ + public boolean getIsRole() { + return isRole; + } + + /** + * @param isRole is this user name a role? + */ + public void setIsRole(boolean isRole) { + this.isRole = isRole; + } + + /** + * @return true if this user name is a group, false else + */ + public boolean getIsGroup() { + return isGroup; + } + + /** + * @param isGroup true if is this user name a group + */ + public void setIsGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + /** + * @return column name + */ + public String getColumnName() { + return columnName; + } + + /** + * @param columnName column name + */ + public void setColumnName(String columnName) { + this.columnName = columnName; + } + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivileges() { + return privileges; + } + + /** + * @param dbPrivileges a set of privileges this user/role/group has + */ + public void setPrivileges(String dbPrivileges) { + this.privileges = dbPrivileges; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + public MTable getTable() { + return table; + } + + public void setTable(MTable table) { + this.table = table; + } + + public MPartition getPartition() { + return partition; + } + + public void setPartition(MPartition partition) { + this.partition = partition; + } + + public void setRole(boolean isRole) { + this.isRole = isRole; + } + + public void setGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityDB.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityDB.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityDB.java (revision 0) @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MSecurityDB { + + private String principalName; + + private boolean isRole; + + private boolean isGroup; + + private MDatabase database; + + private int createTime; + + private String privileges; + + private String grantor; + + public MSecurityDB() { + } + + public MSecurityDB(String principalName, boolean isRole, boolean isGroup, + MDatabase database, String dbPrivileges, int createTime, String grantor) { + super(); + this.principalName = principalName; + this.isRole = isRole; + this.isGroup = isGroup; + this.database = database; + this.privileges = dbPrivileges; + this.createTime = createTime; + this.grantor = grantor; + } + + /** + * @return user name, role name, or group name + */ + public String getPrincipalName() { + return principalName; + } + + /** + * @param userName user/role/group name + */ + public void setPrincipalName(String userName) { + this.principalName = userName; + } + + /** + * @return true if this user name is a role + */ + public boolean getIsRole() { + return isRole; + } + + /** + * @param isRole is this user name a role? + */ + public void setIsRole(boolean isRole) { + this.isRole = isRole; + } + + /** + * @return true if this user name is a group, false else + */ + public boolean getIsGroup() { + return isGroup; + } + + /** + * @param isGroup is this user name a group + */ + public void setIsGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivileges() { + return privileges; + } + + /** + * @param dbPrivileges a set of privileges this user/role/group has + */ + public void setPrivileges(String dbPrivileges) { + this.privileges = dbPrivileges; + } + + public MDatabase getDatabase() { + return database; + } + + public void setDatabase(MDatabase database) { + this.database = database; + } + + public void setRole(boolean isRole) { + this.isRole = isRole; + } + + public void setGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + public int getCreateTime() { + return createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityRoleEntity.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityRoleEntity.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityRoleEntity.java (revision 0) @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MSecurityRoleEntity { + + private String roleName; + + private int createTime; + + private String ownerName; + + private MDatabase database; + + public MSecurityRoleEntity() { + } + + public MSecurityRoleEntity(String roleName, int createTime, String ownerName, + MDatabase database) { + super(); + this.roleName = roleName; + this.createTime = createTime; + this.ownerName = ownerName; + this.database = database; + } + + /** + * @return role name + */ + public String getRoleName() { + return roleName; + } + + /** + * @param roleName + */ + public void setRoleName(String roleName) { + this.roleName = roleName; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime + * role create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + /** + * @return the principal name who created this role + */ + public String getOwnerName() { + return ownerName; + } + + public void setOwnerName(String ownerName) { + this.ownerName = ownerName; + } + + /** + * @return the database this role belongs to + */ + public MDatabase getDatabase() { + return database; + } + + /** + * @param database + */ + public void setDatabase(MDatabase database) { + this.database = database; + } +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityTablePartition.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityTablePartition.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityTablePartition.java (revision 0) @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MSecurityTablePartition { + + private String principalName; + + private boolean isRole; + + private boolean isGroup; + + private MTable table; + + private MPartition partition; + + private String privileges; + + private int createTime; + + private String grantor; + + public MSecurityTablePartition() { + } + + public MSecurityTablePartition(String principalName, boolean isRole, + boolean isGroup, MTable table, MPartition partition, String privileges, + int createTime, String grantor) { + super(); + this.principalName = principalName; + this.isRole = isRole; + this.isGroup = isGroup; + this.table = table; + this.partition = partition; + this.privileges = privileges; + this.createTime = createTime; + this.grantor = grantor; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + /** + * @return true if this user name is a role + */ + public boolean getIsRole() { + return isRole; + } + + /** + * @param isRole is this user name a role? + */ + public void setIsRole(boolean isRole) { + this.isRole = isRole; + } + + /** + * @return true if this user name is a group, false else + */ + public boolean getIsGroup() { + return isGroup; + } + + /** + * @param isGroup true if is this user name a group + */ + public void setIsGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivileges() { + return privileges; + } + + /** + * @param dbPrivileges a set of privileges this user/role/group has + */ + public void setPrivileges(String dbPrivileges) { + this.privileges = dbPrivileges; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + /** + * @return + */ + public String getGrantor() { + return grantor; + } + + /** + * @param grantor + */ + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public void setRole(boolean isRole) { + this.isRole = isRole; + } + + public void setGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + public MTable getTable() { + return table; + } + + public void setTable(MTable table) { + this.table = table; + } + + public MPartition getPartition() { + return partition; + } + + public void setPartition(MPartition partition) { + this.partition = partition; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUser.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUser.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUser.java (revision 0) @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +/** + * User global level privileges + */ +public class MSecurityUser { + + //principal name, can be a user, group, or role + private String principalName; + + private boolean isRole; + + private boolean isGroup; + + private String privileges; + + private int createTime; + + private String grantor; + + public MSecurityUser() { + super(); + } + + public MSecurityUser(String userName, boolean isRole, boolean isGroup, + String dbPrivileges, int createTime, String grantor) { + super(); + this.principalName = userName; + this.isRole = isRole; + this.isGroup = isGroup; + this.privileges = dbPrivileges; + this.createTime = createTime; + this.grantor = grantor; + } + + /** + * @return is this user name a role name? + */ + public boolean getIsRole() { + return isRole; + } + + /** + * @param isRole this user name is a role name? + */ + public void setIsRole(boolean isRole) { + this.isRole = isRole; + } + + /** + * @return is this user name a group name? + */ + public boolean getIsGroup() { + return isGroup; + } + + /** + * @param isGroup this user name a group name + */ + public void setIsGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + /** + * @return a set of global privileges granted to this user + */ + public String getPrivileges() { + return privileges; + } + + /** + * @param dbPrivileges set of global privileges to user + */ + public void setPrivileges(String dbPrivileges) { + this.privileges = dbPrivileges; + } + + public void setRole(boolean isRole) { + this.isRole = isRole; + } + + public void setGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + public int getCreateTime() { + return createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUserRoleMap.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUserRoleMap.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MSecurityUserRoleMap.java (revision 0) @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MSecurityUserRoleMap { + + private String principalName; + + private boolean isRole; + + private boolean isGroup; + + private MSecurityRoleEntity role; + + private int addTime; + + public MSecurityUserRoleMap() { + } + + public MSecurityUserRoleMap(String principalName, boolean isRole, + boolean isGroup, MSecurityRoleEntity role, int addTime) { + super(); + this.principalName = principalName; + this.isRole = isRole; + this.isGroup = isGroup; + this.role = role; + this.addTime = addTime; + } + + /** + * @return principal name + */ + public String getPrincipalName() { + return principalName; + } + + /** + * @param userName principal name + */ + public void setPrincipalName(String userName) { + this.principalName = userName; + } + + /** + * @return is role + */ + public boolean isRole() { + return isRole; + } + + /** + * @param isRole + */ + public void setRole(boolean isRole) { + this.isRole = isRole; + } + + /** + * @return is group + */ + public boolean isGroup() { + return isGroup; + } + + /** + * @param isGroup + */ + public void setGroup(boolean isGroup) { + this.isGroup = isGroup; + } + + /** + * @return add time + */ + public int getAddTime() { + return addTime; + } + + /** + * @param addTime + */ + public void setAddTime(int addTime) { + this.addTime = addTime; + } + + public MSecurityRoleEntity getRole() { + return role; + } + + public void setRole(MSecurityRoleEntity role) { + this.role = role; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.exec.ExecDriver; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.TaskResult; @@ -65,22 +66,29 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.metadata.DummyPartition; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ErrorMsg; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl; +import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.ParseDriver; import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.processors.CommandProcessor; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; @@ -369,8 +377,20 @@ if (plan.getFetchTask() != null) { plan.getFetchTask().initialize(conf, plan, null); } + + //do the authorization check + if (HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { + try { + doAuthorization(sem); + } catch (AuthorizationException authExp) { + console.printError("Authrization failed:" + authExp.getMessage() + + ". Use show grant to get more details."); + return 403; + } + } - return (0); + return 0; } catch (SemanticException e) { errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); @@ -392,6 +412,141 @@ } } + private boolean doAuthorization(BaseSemanticAnalyzer sem) + throws HiveException, AuthorizationException { + HashSet inputs = sem.getInputs(); + HashSet outputs = sem.getOutputs(); + SessionState ss = SessionState.get(); + HiveOperation op = ss.getHiveOperation(); + Hive db = sem.getDb(); + boolean pass = true; + if (op != null) { + if (outputs != null && outputs.size() > 0) { + if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)) { + pass = ss.getAuthorizer() + .authorize( + db.getDatabase(db.getCurrentDatabase()), + null, + HiveOperation.CREATETABLE_AS_SELECT + .getOutputRequiredPrivileges()); + } else { + for (WriteEntity write : outputs) { + + if (write.getType() == WriteEntity.Type.PARTITION) { + Partition part = db.getPartition(write.getTable(), write + .getPartition().getSpec(), false); + if (part != null) { + pass = pass + && ss.getAuthorizer().authorize(write.getPartition(), null, + op.getOutputRequiredPrivileges()); + continue; + } + } + + if (write.getTable() != null) { + pass = pass + && ss.getAuthorizer().authorize(write.getTable(), null, + op.getOutputRequiredPrivileges()); + } + + if (!pass) { + break; + } + } + + } + } + + if (pass && inputs != null && inputs.size() > 0) { + + Map> tab2Cols = new HashMap>(); + Map> part2Cols = new HashMap>(); + + for (ReadEntity read : inputs) { + boolean part = read.getPartition() != null; + if (part) { + part2Cols.put(read.getPartition(), new ArrayList()); + } else { + tab2Cols.put(read.getTable(), new ArrayList()); + } + } + + if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) + || op.equals(HiveOperation.QUERY)) { + SemanticAnalyzer querySem = (SemanticAnalyzer) sem; + ParseContext parseCtx = querySem.getParseContext(); + Map tsoTopMap = parseCtx.getTopToTable(); + + for (Map.Entry> topOpMap : querySem + .getParseContext().getTopOps().entrySet()) { + Operator topOp = topOpMap.getValue(); + if (topOp instanceof TableScanOperator + && tsoTopMap.containsKey(topOp)) { + TableScanOperator tableScanOp = (TableScanOperator) topOp; + Table tbl = tsoTopMap.get(tableScanOp); + List neededColumnIds = tableScanOp.getNeededColumnIDs(); + List columns = tbl.getCols(); + List cols = new ArrayList(); + if (neededColumnIds != null && neededColumnIds.size() > 0) { + for (int i = 0; i < neededColumnIds.size(); i++) { + cols.add(columns.get(neededColumnIds.get(i)).getName()); + } + } else { + for (int i = 0; i < columns.size(); i++) { + cols.add(columns.get(i).getName()); + } + } + if (tbl.isPartitioned()) { + String alias_id = topOpMap.getKey(); + PrunedPartitionList partsList = PartitionPruner.prune(parseCtx + .getTopToTable().get(topOp), parseCtx.getOpToPartPruner() + .get(topOp), parseCtx.getConf(), alias_id, parseCtx + .getPrunedPartitions()); + Set parts = new HashSet(); + parts.addAll(partsList.getConfirmedPartns()); + parts.addAll(partsList.getUnknownPartns()); + for (Partition part : parts) { + part2Cols.put(part, cols); + } + } else { + tab2Cols.put(tbl, cols); + } + } + } + } + + for (ReadEntity read : inputs) { + if (read.getPartition() != null) { + List cols = part2Cols.get(read.getPartition()); + if (cols != null && cols.size() > 0) { + pass = pass + && ss.getAuthorizer().authorize( + read.getPartition().getTable(), read.getPartition(), + cols, op.getInputRequiredPrivileges(), null); + } else { + pass = pass + && ss.getAuthorizer().authorize(read.getPartition(), + op.getInputRequiredPrivileges(), null); + } + } else if (read.getTable() != null) { + List cols = tab2Cols.get(read.getTable()); + if (cols != null && cols.size() > 0) { + pass = pass + && ss.getAuthorizer().authorize(read.getTable(), null, cols, + op.getInputRequiredPrivileges(), null); + } else { + pass = pass + && ss.getAuthorizer().authorize(read.getTable(), + op.getInputRequiredPrivileges(), null); + } + } + } + } + + } + return pass; + } + /** * @return The current query plan associated with this Driver, if any. */ @@ -685,7 +840,7 @@ releaseLocks(ctx.getHiveLocks()); return new CommandProcessorResponse(ret, errorMessage, SQLState); } - + ret = acquireReadWriteLocks(); if (ret != 0) { releaseLocks(ctx.getHiveLocks()); @@ -752,8 +907,6 @@ boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME)); int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); - int curJobNo = 0; - String queryId = plan.getQueryId(); String queryStr = plan.getQueryStr(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -33,6 +33,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -56,6 +57,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.ColumnPrivilegeBag; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; @@ -63,6 +65,12 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SecurityColumn; +import org.apache.hadoop.hive.metastore.api.SecurityDB; +import org.apache.hadoop.hive.metastore.api.SecurityTablePartition; +import org.apache.hadoop.hive.metastore.api.SecurityUser; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; @@ -95,10 +103,18 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.GrantDesc; +import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.PrincipalDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RevokeDesc; +import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; @@ -108,6 +124,7 @@ import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -118,6 +135,7 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; /** * DDLTask implementation. @@ -282,7 +300,33 @@ if (showParts != null) { return showPartitions(db, showParts); } - + + RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc(); + if (roleDDLDesc != null) { + return roleDDL(roleDDLDesc); + } + + GrantDesc grantDesc = work.getGrantDesc(); + if (grantDesc != null) { + return grantOrRevokePrivileges(grantDesc.getPrincipals(), grantDesc.getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), true); + } + + RevokeDesc revokeDesc = work.getRevokeDesc(); + if (revokeDesc != null) { + return grantOrRevokePrivileges(revokeDesc.getPrincipals(), revokeDesc + .getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), false); + } + + ShowGrantDesc showGrantDesc = work.getShowGrantDesc(); + if (showGrantDesc != null) { + return showGrants(showGrantDesc); + } + + GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL(); + if (grantOrRevokeRoleDDL != null) { + return grantOrRevokeRole(grantOrRevokeRoleDDL); + } + ShowIndexesDesc showIndexes = work.getShowIndexesDesc(); if (showIndexes != null) { return showIndexes(db, showIndexes); @@ -306,6 +350,385 @@ return 0; } + private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL) + throws HiveException { + try { + boolean grantRole = grantOrRevokeRoleDDL.getGrant(); + List principals = grantOrRevokeRoleDDL.getPrincipalDesc(); + List roles = grantOrRevokeRoleDDL.getRoles(); + for (PrincipalDesc principal : principals) { + String userName = principal.getName(); + boolean isRole = principal.getType() == PrincipalDesc.PrincipalType.ROLE; + boolean isGroup = principal.getType() == PrincipalDesc.PrincipalType.GROUP; + for (String roleName : roles) { + if (grantRole) { + db.addRoleMember(roleName, userName, isRole, isGroup); + } else { + db.removeRoleMember(roleName, userName, isRole, isGroup); + } + } + } + } catch (Exception e) { + e.printStackTrace(); + throw new HiveException(e); + } + return 0; + } + + private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { + try { + Path resFile = new Path(showGrantDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + PrincipalDesc principalDesc = showGrantDesc.getPrincipalDesc(); + boolean isGroup = (principalDesc.getType()== PrincipalDesc.PrincipalType.GROUP); + boolean isRole = (principalDesc.getType()== PrincipalDesc.PrincipalType.ROLE); + PrivilegeObjectDesc hiveObjectDesc = showGrantDesc.getHiveObj(); + String principalName = principalDesc.getName(); + if (hiveObjectDesc == null) { + //show user level privileges + List users = db.showUserLevelGrant(principalName, + isGroup, isRole); + if (users != null && users.size() > 0) { + boolean first = true; + for(SecurityUser usr: users) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfor(outStream, isGroup, isRole, + principalName, null, null, null, null, + usr.getPrivileges(), usr.getCreateTime(), usr.getGrantor()); + + } + } + } else { + String obj = hiveObjectDesc.getObject(); + boolean notFound = true; + String dbName = null; + String tableName = null; + Table tableObj = null; + Database dbObj = null; + + if (hiveObjectDesc.getTable()) { + String[] dbTab = obj.split("\\."); + if (dbTab.length == 2) { + dbName = dbTab[0]; + tableName = dbTab[1]; + } else { + dbName = db.getCurrentDatabase(); + tableName = obj; + } + dbObj = db.getDatabase(dbName); + tableObj = db.getTable(dbName, tableName); + notFound = (dbObj == null || tableObj == null); + } else { + dbName = hiveObjectDesc.getObject(); + dbObj = db.getDatabase(dbName); + notFound = (dbObj == null); + } + if (notFound) { + throw new HiveException(obj + " can not be found"); + } + + String partName = null; + if (hiveObjectDesc.getPartSpec() != null) { + partName = Warehouse + .makePartName(hiveObjectDesc.getPartSpec(), false); + } + + if (!hiveObjectDesc.getTable()) { + // show database level privileges + List dbs = db.showDBLevelGrant(principalName, isGroup, + isRole, dbName); + if (dbs != null && dbs.size() > 0) { + boolean first = true; + for(SecurityDB db: dbs) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfor(outStream, isGroup, isRole, + principalName, dbName, null, null, null, + db.getPrivileges(), db.getCreateTime(), db.getGrantor()); + + } + } + + } else { + if (showGrantDesc.getColumns() != null) { + // show column level privileges + for (String columnName : showGrantDesc.getColumns()) { + List columnss = db.showColumnGrant(principalName, + isGroup, isRole, dbName, tableName, partName, columnName); + if (columnss != null && columnss.size() > 0) { + boolean first = true; + for (SecurityColumn col : columnss) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfor(outStream, isGroup, isRole, + principalName, dbName, tableName, partName, columnName, + col.getPrivileges(), col.getCreateTime(), col.getGrantor()); + } + } + } + } else if (hiveObjectDesc.getPartSpec() != null) { + // show partition level privileges + List parts = db.showPartitionGrant(principalName, + isGroup, isRole, dbName, tableName, partName); + if (parts != null && parts.size() > 0) { + boolean first = true; + for(SecurityTablePartition part: parts) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfor(outStream, isGroup, isRole, + principalName, dbName, tableName, partName, null, + part.getPrivileges(), part.getCreateTime(), part.getGrantor()); + + } + } + } + else { + // show table level privileges + List tbls = db.showTableLevelGrant( + principalName, isGroup, isRole, dbName, tableName); + if (tbls!= null && tbls.size() > 0) { + boolean first = true; + for(SecurityTablePartition tbl: tbls) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfor(outStream, isGroup, isRole, + principalName, dbName, tableName, null, null, + tbl.getPrivileges(), tbl.getCreateTime(), tbl.getGrantor()); + + } + } + } + } + } + ((FSDataOutputStream) outStream).close(); + } catch (FileNotFoundException e) { + LOG.info("show table status: " + stringifyException(e)); + return 1; + } catch (IOException e) { + LOG.info("show table status: " + stringifyException(e)); + return 1; + } catch (Exception e) { + e.printStackTrace(); + throw new HiveException(e); + } + return 0; + } + + private int grantOrRevokePrivileges(List principals, + List privileges, PrivilegeObjectDesc privSubjectDesc, + boolean grant) { + if (privileges == null || privileges.size() == 0) { + console.printError("No privilege found."); + return 1; + } + + String dbName = null; + String tableName = null; + Table tableObj = null; + Database dbObj = null; + + try { + + if (privSubjectDesc != null) { + if (privSubjectDesc.getPartSpec() != null && grant) { + throw new HiveException("Grant does not support partition level."); + } + String obj = privSubjectDesc.getObject(); + boolean notFound = true; + if (privSubjectDesc.getTable()) { + String[] dbTab = obj.split("\\."); + if (dbTab.length == 2) { + dbName = dbTab[0]; + tableName = dbTab[1]; + } else { + dbName = db.getCurrentDatabase(); + tableName = obj; + } + dbObj = db.getDatabase(dbName); + tableObj = db.getTable(dbName, tableName); + notFound = (dbObj == null || tableObj == null); + } else { + dbName = privSubjectDesc.getObject(); + dbObj = db.getDatabase(dbName); + notFound = (dbObj == null); + } + if (notFound) { + throw new HiveException(obj + " can not be found"); + } + } + + PrivilegeBag privBag = new PrivilegeBag(); + String userPrivs = ""; + if (privSubjectDesc == null) { + boolean first = true; + for (int idx = 0; idx < privileges.size(); idx++) { + Privilege priv = privileges.get(idx).getPrivilege(); + if (privileges.get(idx).getColumns() != null + && privileges.get(idx).getColumns().size() > 0) { + throw new HiveException( + "For user-level privielges, column sets should be null. columns=" + + privileges.get(idx).getColumns().toString()); + } + if (!first) { + userPrivs = userPrivs + StringUtils.COMMA; + } else { + first = false; + } + userPrivs = userPrivs + priv.getPriv(); + } + privBag.setUserPrivileges(userPrivs); + } else { + List columnPrivBags = new ArrayList(); + Map dbPrivs = new HashMap(); + Map tabPrivs = new HashMap(); + Map partPrivs = new HashMap(); + org.apache.hadoop.hive.metastore.api.Partition partObj = null; + + if ((!tableObj.isPartitioned()) + && privSubjectDesc.getPartSpec() != null) { + throw new HiveException( + "Table is partitioned, but partition spec found. partSpec=" + + privSubjectDesc.getPartSpec().toString()); + } + + String partName = null; + if (privSubjectDesc.getPartSpec() != null) { + partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(), false).getTPartition(); + partName = Warehouse.makePartName(tableObj.getPartCols(), + partObj.getValues()); + } + + for (PrivilegeDesc privDesc : privileges) { + List columns = privDesc.getColumns(); + Privilege priv = privDesc.getPrivilege(); + if (columns != null && columns.size() > 0) { + if (!priv.supportColumnLevel()) { + throw new HiveException(priv.getPriv() + + " does not support column level."); + } + if (privSubjectDesc == null || tableName == null) { + throw new HiveException( + "For user-level/database-level privielges, column sets should be null. columns=" + + columns); + } + Map columnPrivileges = new HashMap(); + for (int i = 0; i < columns.size(); i++) { + columnPrivileges.put(columns.get(i), priv.getPriv()); + } + ColumnPrivilegeBag columnPrivBag = new ColumnPrivilegeBag(dbName, + tableName, partName, columnPrivileges); + columnPrivBags.add(columnPrivBag); + } else { + if (privSubjectDesc.getTable()) { + if (privSubjectDesc.getPartSpec() != null && !grant) { + partPrivs.put(partObj, priv.getPriv()); + } else { + tabPrivs.put(tableObj.getTTable(), priv.getPriv()); + } + } else { + dbPrivs.put(dbObj, priv.getPriv()); + } + } + } + + if (columnPrivBags.size() > 0) { + privBag.setColumnPrivileges(columnPrivBags); + } + if (tabPrivs.size() > 0) { + privBag.setTablePrivileges(tabPrivs); + } + + if (partPrivs != null && partPrivs.size() > 0) { + privBag.setPartitionPrivileges(partPrivs); + } + if (dbPrivs.size() > 0) { + privBag.setDbPrivileges(dbPrivs); + } + } + + for (PrincipalDesc principal : principals) { + boolean isRole = (principal.getType() == PrincipalDesc.PrincipalType.ROLE); + boolean isGroup = (principal.getType() == PrincipalDesc.PrincipalType.GROUP); + if (grant) { + db + .grantPrivileges(principal.getName(), isRole, isGroup, privBag, + ""); + } else { + db + .revokePrivileges(principal.getName(), isRole, isGroup, privBag, + ""); + } + } + } catch (Exception e) { + console.printError("Error: " + e.getMessage()); + return 1; + } + + return 0; + } + + private int roleDDL(RoleDDLDesc roleDDLDesc) { + RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation(); + try { + if (operation.equals(RoleDDLDesc.RoleOperation.CREATE_ROLE)) { + db.createRole(roleDDLDesc.getName()); + } else if (operation.equals(RoleDDLDesc.RoleOperation.DROP_ROLE)) { + db.dropRole(roleDDLDesc.getName()); + } else if (operation.equals(RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT)) { + List roles = db.showRoleGrant(roleDDLDesc.getName(), + roleDDLDesc.getRole(), roleDDLDesc.getGroup()); + if (roles != null && roles.size() > 0) { + Path resFile = new Path(roleDDLDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + for (Role role : roles) { + outStream.writeBytes("role name:" + role.getRoleName()); + outStream.write(terminator); + outStream.writeBytes("database:" + role.getDatabase().getName()); + outStream.write(terminator); + } + ((FSDataOutputStream) outStream).close(); + } + } else { + throw new HiveException("Unkown role operation " + + operation.getOperationName()); + } + } catch (HiveException e) { + e.printStackTrace(); + console.printError("Error in role operation " + + operation.getOperationName() + " on role name " + + roleDDLDesc.getName() + ", error message " + e.getMessage()); + return 1; + } catch (IOException e) { + LOG.info("role ddl exception: " + stringifyException(e)); + return 1; + } + + return 0; + } + private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(), dropIdx.getIndexName(), true); @@ -363,7 +786,7 @@ Partition part = db .getPartition(tbl, addPartitionDesc.getPartSpec(), false); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, true)); return 0; } @@ -394,7 +817,7 @@ throw new HiveException("Uable to update table"); } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, true)); } else { Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false); if (part == null) { @@ -406,7 +829,7 @@ throw new HiveException(e); } work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, true)); } return 0; } @@ -1765,6 +2188,41 @@ return 0; } + + public static void writeGrantInfor(DataOutput outStream, boolean isGroup, boolean isRole, + String principalName, String dbName, String tableName, String partName, + String columnName, String privileges, int createTime, String grantor) + throws IOException { + + if (dbName != null) { + writeKeyValuePair(outStream, "database", dbName); + } + if (tableName != null) { + writeKeyValuePair(outStream, "table", tableName); + } + if (partName != null) { + writeKeyValuePair(outStream, "partition", partName); + } + if (columnName != null) { + writeKeyValuePair(outStream, "columnName", columnName); + } + + writeKeyValuePair(outStream, "userName", principalName); + writeKeyValuePair(outStream, "isRole", "" + isRole); + writeKeyValuePair(outStream, "isGroup", "" + isGroup); + writeKeyValuePair(outStream, "privileges", privileges); + writeKeyValuePair(outStream, "grantTime", "" + createTime); + writeKeyValuePair(outStream, "grantor", grantor); + } + + private static void writeKeyValuePair(DataOutput outStream, String key, + String value) throws IOException { + outStream.write(terminator); + outStream.writeBytes(key); + outStream.write(separator); + outStream.writeBytes(value); + outStream.write(separator); + } private void writeFileSystemStats(DataOutput outStream, List locations, Path tabLoc, boolean partSpecified, int indent) throws IOException { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (working copy) @@ -174,7 +174,7 @@ .getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()), tbd.getHoldDDLTime()); if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(table)); + work.getOutputs().add(new WriteEntity(table, true)); } } else { LOG.info("Partition is: " + tbd.getPartitionSpec().toString()); @@ -210,7 +210,7 @@ for (LinkedHashMap partSpec: dp) { Partition partn = db.getPartition(table, partSpec, false); - WriteEntity enty = new WriteEntity(partn); + WriteEntity enty = new WriteEntity(partn, true); if (work.getOutputs() != null) { work.getOutputs().add(enty); } @@ -243,7 +243,7 @@ dc = new DataContainer(table.getTTable(), partn.getTPartition()); // add this partition to post-execution hook if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(partn)); + work.getOutputs().add(new WriteEntity(partn, true)); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java (revision 0) @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata; + +public class AuthorizationException extends RuntimeException { + /** + */ + private static final long serialVersionUID = 1L; + + public AuthorizationException() { + super(); + } + + /** + * Constructs an {@link AuthorizationException} with the specified detail + * message. + * + * @param s + * the detail message. + */ + public AuthorizationException(String message) { + super(message); + } + + /** + * Constructs an {@link AuthorizationException} with the specified cause. + * + * @param cause + * the cause + */ + public AuthorizationException(Throwable cause) { + super(cause); + } + + + public AuthorizationException(String message, Throwable cause) { + super(message, cause); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -58,11 +58,20 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SecurityColumn; +import org.apache.hadoop.hive.metastore.api.SecurityDB; +import org.apache.hadoop.hive.metastore.api.SecurityTablePartition; +import org.apache.hadoop.hive.metastore.api.SecurityUser; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.model.MSecurityUser; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.serde2.Deserializer; @@ -737,7 +746,7 @@ } catch (NoSuchObjectException e) { if (throwException) { LOG.error(StringUtils.stringifyException(e)); - throw new InvalidTableException("Table not found ", tableName); + throw new InvalidTableException("Table " + tableName + " not found ", tableName); } return null; } catch (Exception e) { @@ -877,6 +886,52 @@ } } + public boolean grantPrivileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges, String grantor) + throws HiveException { + try { + return getMSC().grant_privileges(userName, isRole, isGroup, privileges, + grantor); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param userName + * principal name + * @param isRole + * is the given principal name a role + * @param isGroup + * is the given principal name a group + * @param privileges + * a bag of privileges + * @return + * @throws HiveException + */ + public boolean revokePrivileges(String userName, boolean isRole, + boolean isGroup, PrivilegeBag privileges, String grantor) + throws HiveException { + try { + return getMSC().revoke_privileges(userName, isRole, isGroup, privileges); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param dbName + * @return + * @throws HiveException + */ + public Database getDatabase(String dbName) throws HiveException { + try { + return getMSC().getDatabase(dbName); + } catch (Exception e) { + throw new HiveException(e); + } + } + /** * Query metadata to see if a database with the given name already exists. * @@ -1364,6 +1419,218 @@ public void setCurrentDatabase(String currentDatabase) { this.currentDatabase = currentDatabase; } + + public void createRole(String roleName) throws HiveException { + try { + getMSC().create_role(roleName, "", this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropRole(String roleName) throws HiveException { + try { + getMSC().drop_role(roleName, this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showRoleGrant(String principalName, boolean isRole, + boolean isGroup) throws HiveException { + try { + return getMSC().list_roles(principalName, isRole, isGroup, this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public boolean addRoleMember(String roleName, String userName, + boolean isRole, boolean isGroup) throws HiveException { + try { + return getMSC().add_role_member(roleName, userName, isRole, isGroup, + this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public boolean removeRoleMember(String roleName, String userName, + boolean isRole, boolean isGroup) throws HiveException { + try { + return getMSC().remove_role_member(roleName, userName, isRole, isGroup, + this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List listRoles(String userName, boolean isRole, boolean isGroup) + throws HiveException { + try { + return getMSC().list_roles(userName, isRole, isGroup, this.getCurrentDatabase()); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showUserLevelGrant(String principalName, + boolean isRole, boolean isGroup) throws HiveException { + try { + return getMSC().list_security_user_grant(principalName, isRole, isGroup); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_user_privilege_set(String user_name, + List group_names) throws HiveException { + try { + return getMSC().get_user_privilege_set(user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * database name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_db_privilege_set(String db_name, + String user_name, List group_names) throws HiveException { + try { + return getMSC().get_db_privilege_set(db_name, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_table_privilege_set(String db_name, + String table_name, String user_name, List group_names) + throws HiveException{ + try { + return getMSC().get_table_privilege_set(db_name, table_name, user_name, + group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param part_name + * partition name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_partition_privilege_set(String db_name, + String table_name, String part_name, String user_name, + List group_names) throws HiveException { + try { + return getMSC().get_partition_privilege_set(db_name, table_name, part_name, user_name, + group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * database name + * @param table_name + * table name + * @param part_name + * partition name + * @param column_name + * column name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_column_privilege_set(String db_name, + String table_name, String part_name, String column_name, + String user_name, List group_names) throws HiveException { + try { + return getMSC().get_column_privilege_set(db_name, table_name, part_name, + column_name, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showDBLevelGrant(String principalName, + boolean isGroup, boolean isRole, String dbName) throws HiveException { + try { + return getMSC().list_security_db_grant(principalName, isGroup, isRole, + dbName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showTableLevelGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName) throws HiveException { + try { + return getMSC().list_security_table_grant(principalName, isGroup, + isRole, dbName, tableName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showPartitionGrant( + String principalName, boolean isGroup, boolean isRole, String dbName, + String tableName, String partName) throws HiveException { + try { + return getMSC().list_security_partition_grant(principalName, isGroup, + isRole, dbName, tableName, partName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showColumnGrant(String principal_name, + boolean isGroup, boolean isRole, String dbName, String tableName, + String partName, String columnName) throws HiveException { + try { + return getMSC().list_security_column_grant(principal_name, isGroup, + isRole, dbName, tableName, partName, columnName); + } catch (Exception e) { + throw new HiveException(e); + } + } static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf, boolean replace) throws HiveException { Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (working copy) @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; -import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.UnionOperator; @@ -36,6 +35,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.plan.HiveQueryReadWrite; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -242,6 +242,7 @@ * to the hooks. */ private Set outputs; + private HiveQueryReadWrite hiveInAndOut; public GenMRProcContext() { } @@ -265,6 +266,7 @@ * the set of input tables/partitions generated by the walk * @param outputs * the set of destinations generated by the walk + * @param hiveInAndOut */ public GenMRProcContext( HiveConf conf, @@ -273,7 +275,7 @@ List> mvTask, List> rootTasks, LinkedHashMap, GenMapRedCtx> mapCurrCtx, - Set inputs, Set outputs) { + Set inputs, Set outputs, HiveQueryReadWrite hiveInAndOut) { this.conf = conf; this.opTaskMap = opTaskMap; this.seenOps = seenOps; @@ -292,6 +294,7 @@ rootOps.addAll(parseCtx.getTopOps().values()); unionTaskMap = new HashMap(); mapJoinTaskMap = new HashMap, GenMRMapJoinCtx>(); + this.hiveInAndOut = hiveInAndOut; } /** @@ -493,11 +496,13 @@ unionTaskMap.put(op, uTask); } - public GenMRMapJoinCtx getMapJoinCtx(AbstractMapJoinOperator op) { + public GenMRMapJoinCtx getMapJoinCtx( + AbstractMapJoinOperator op) { return mapJoinTaskMap.get(op); } - public void setMapJoinCtx(AbstractMapJoinOperator op, GenMRMapJoinCtx mjCtx) { + public void setMapJoinCtx(AbstractMapJoinOperator op, + GenMRMapJoinCtx mjCtx) { mapJoinTaskMap.put(op, mjCtx); } @@ -529,4 +534,12 @@ public void setConf(HiveConf conf) { this.conf = conf; } + + public HiveQueryReadWrite getHiveInAndOut() { + return hiveInAndOut; + } + + public void setHiveInAndOut(HiveQueryReadWrite hiveInAndOut) { + this.hiveInAndOut = hiveInAndOut; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.plan.HiveQueryReadWrite; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; @@ -71,6 +72,8 @@ protected Context ctx; protected HashMap idToTableNameMap; + + protected HiveQueryReadWrite hiveInAndOut; public static int HIVE_COLUMN_ORDER_ASC = 1; public static int HIVE_COLUMN_ORDER_DESC = 0; @@ -222,6 +225,7 @@ idToTableNameMap = new HashMap(); inputs = new LinkedHashSet(); outputs = new LinkedHashSet(); + hiveInAndOut = new HiveQueryReadWrite(); } catch (Exception e) { throw new SemanticException(e); } @@ -719,4 +723,8 @@ } return partSpec; } + + public Hive getDb() { + return db; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -68,6 +68,13 @@ import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.GrantDesc; +import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; +import org.apache.hadoop.hive.ql.plan.PrincipalDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RevokeDesc; +import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; import org.apache.hadoop.hive.ql.plan.DescTableDesc; @@ -79,6 +86,7 @@ import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; @@ -88,6 +96,8 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.AlterTableDesc.AlterTableTypes; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.hive.ql.security.authorization.PrivilegeRegistry; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.mapred.TextInputFormat; @@ -246,11 +256,232 @@ analyzeDropDatabase(ast); } else if (ast.getToken().getType() == TOK_SWITCHDATABASE) { analyzeSwitchDatabase(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_CREATEROLE) { + analyzeCreateRole(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_DROPROLE) { + analyzeDropRole(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_SHOW_ROLE_GRANT) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowRoleGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_GRANT_ROLE) { + anaylzeGrantRevokeRole(true, ast); + } else if (ast.getToken().getType() == HiveParser.TOK_REVOKE_ROLE) { + anaylzeGrantRevokeRole(false, ast); + } else if (ast.getToken().getType() == HiveParser.TOK_GRANT) { + analyzeGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_SHOW_GRANT) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_REVOKE) { + analyzeRevoke(ast); } else { throw new SemanticException("Unsupported command."); } } + private void anaylzeGrantRevokeRole(boolean grant, ASTNode ast) { + List principalDesc = analyzePrinciplaListDef( + (ASTNode) ast.getChild(0)); + List roles = new ArrayList(); + for (int i = 1; i < ast.getChildCount(); i++) { + roles.add(unescapeIdentifier(ast.getChild(i).getText())); + } + + GrantRevokeRoleDDL grantRevokeRoleDDL = new GrantRevokeRoleDDL(grant, roles, principalDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + grantRevokeRoleDDL), conf)); + } + + private void analyzeShowGrant(ASTNode ast) throws SemanticException { + PrivilegeObjectDesc privHiveObj = null; + + ASTNode principal = (ASTNode) ast.getChild(0); + PrincipalDesc.PrincipalType type = null; + switch (principal.getType()) { + case HiveParser.TOK_USER: + type = PrincipalDesc.PrincipalType.USER; + break; + case HiveParser.TOK_GROUP: + type = PrincipalDesc.PrincipalType.GROUP; + break; + case HiveParser.TOK_ROLE: + type = PrincipalDesc.PrincipalType.ROLE; + break; + } + String principlaName = unescapeIdentifier(principal.getChild(0).getText()); + PrincipalDesc principalDesc = new PrincipalDesc(principlaName, type); + List cols = null; + if (ast.getChildCount() > 1) { + ASTNode child = (ASTNode) ast.getChild(1); + if (child.getToken().getType() == HiveParser.TOK_PRIV_OBJECT_COL) { + privHiveObj = new PrivilegeObjectDesc(); + privHiveObj.setTable(child.getChild(0) != null); + privHiveObj.setObject(unescapeIdentifier(child.getChild(1).getText())); + if (child.getChildCount() > 2) { + for (int i = 0; i < child.getChildCount(); i++) { + ASTNode grandChild = (ASTNode) child.getChild(i); + if (grandChild.getToken().getType() == HiveParser.TOK_PARTSPEC) { + privHiveObj.setPartSpec(DDLSemanticAnalyzer.getPartSpec(grandChild)); + } else if (grandChild.getToken().getType() == HiveParser.TOK_TABCOLNAME) { + cols = getColumnNames((ASTNode) grandChild); + } + } + } + } + } + + if (privHiveObj == null && cols != null) { + throw new SemanticException( + "For user-level privielges, column sets should be null. columns=" + + cols.toString()); + } + + ShowGrantDesc showGrant = new ShowGrantDesc(ctx.getResFile().toString(), + principalDesc, privHiveObj, cols); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showGrant), conf)); + } + + private void analyzeGrant(ASTNode ast) throws SemanticException { + List privilegeDesc = analyzePrivilegeListDef( + (ASTNode) ast.getChild(0)); + List principalDesc = analyzePrinciplaListDef( + (ASTNode) ast.getChild(1)); + boolean grantOption = false; + PrivilegeObjectDesc subjectObj = null; + + if (ast.getChildCount() > 2) { + for (int i = 2; i < ast.getChildCount(); i++) { + ASTNode astChild = (ASTNode) ast.getChild(i); + if (astChild.getType() == HiveParser.TOK_GRANT_WITH_OPTION) { + grantOption = true; + } else if (astChild.getType() == HiveParser.TOK_PRIV_OBJECT) { + subjectObj = analyzePrivilegeObject(astChild); + } + } + } + + GrantDesc grantDesc = new GrantDesc(subjectObj, privilegeDesc, principalDesc, grantOption); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + grantDesc), conf)); + } + + private void analyzeRevoke(ASTNode ast) throws SemanticException { + List privilegeDesc = analyzePrivilegeListDef( + (ASTNode) ast.getChild(0)); + List principalDesc = analyzePrinciplaListDef( + (ASTNode) ast.getChild(1)); + PrivilegeObjectDesc hiveObj = null; + if (ast.getChildCount() > 2) { + ASTNode astChild = (ASTNode) ast.getChild(2); + hiveObj = analyzePrivilegeObject(astChild); + } + + RevokeDesc revokeDesc = new RevokeDesc(privilegeDesc, principalDesc, hiveObj); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + revokeDesc), conf)); + } + + + private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast) + throws SemanticException { + PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); + subject.setTable(ast.getChild(0) != null); + subject.setObject(unescapeIdentifier(ast.getChild(1).getText())); + if (ast.getChildCount() > 2) { + ASTNode astChild = (ASTNode) ast.getChild(2); + if (astChild.getToken().getType() == HiveParser.TOK_PARTSPEC) { + subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(astChild)); + } + } + return subject; + } + + private List analyzePrinciplaListDef(ASTNode node) { + List principalList = new ArrayList(); + + for (int i = 0; i < node.getChildCount(); i++) { + ASTNode child = (ASTNode) node.getChild(i); + PrincipalDesc.PrincipalType type = null; + switch (child.getType()) { + case HiveParser.TOK_USER: + type = PrincipalDesc.PrincipalType.USER; + break; + case HiveParser.TOK_GROUP: + type = PrincipalDesc.PrincipalType.GROUP; + break; + case HiveParser.TOK_ROLE: + type = PrincipalDesc.PrincipalType.ROLE; + break; + } + String principlaName = unescapeIdentifier(child.getChild(0).getText()); + PrincipalDesc principalDesc = new PrincipalDesc(principlaName, type); + principalList.add(principalDesc); + } + + return principalList; + } + + private List analyzePrivilegeListDef(ASTNode node) + throws SemanticException { + List ret = new ArrayList(); + for (int i = 0; i < node.getChildCount(); i++) { + ASTNode privilegeDef = (ASTNode) node.getChild(i); + + String privilegeStr = unescapeIdentifier(privilegeDef.getChild(0) + .getText()); + Privilege privObj = PrivilegeRegistry.getPrivilege(privilegeStr); + if (privObj == null) { + throw new SemanticException("undefined privilege " + privilegeStr); + } + List cols = null; + if (privilegeDef.getChildCount() > 1) { + cols = getColumnNames((ASTNode) privilegeDef.getChild(1)); + } + PrivilegeDesc privilegeDesc = new PrivilegeDesc(privObj, cols); + ret.add(privilegeDesc); + } + return ret; + } + + private void analyzeCreateRole(ASTNode ast) { + String roleName = unescapeIdentifier(ast.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(roleName, + RoleDDLDesc.RoleOperation.CREATE_ROLE); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + + private void analyzeDropRole(ASTNode ast) { + String roleName = unescapeIdentifier(ast.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(roleName, + RoleDDLDesc.RoleOperation.DROP_ROLE); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + + private void analyzeShowRoleGrant(ASTNode ast) { + ASTNode child = (ASTNode) ast.getChild(0); + boolean isRole = false; + boolean isGroup = false; + switch (child.getType()) { + case HiveParser.TOK_USER: + break; + case HiveParser.TOK_GROUP: + isGroup = true; + break; + case HiveParser.TOK_ROLE: + isRole = true; + break; + } + String principalName = unescapeIdentifier(child.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(principalName, isRole, isGroup, + RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT); + createRoleDesc.setResFile(ctx.getResFile().toString()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -188,6 +188,23 @@ TOK_LATERAL_VIEW; TOK_TABALIAS; TOK_ANALYZE; +TOK_CREATEROLE; +TOK_DROPROLE; +TOK_GRANT; +TOK_REVOKE; +TOK_SHOW_GRANT; +TOK_PRIVILEGE_LIST; +TOK_PRIVILEGE; +TOK_PRINCIPAL_NAME; +TOK_USER; +TOK_GROUP; +TOK_ROLE; +TOK_GRANT_WITH_OPTION; +TOK_PRIV_OBJECT; +TOK_PRIV_OBJECT_COL; +TOK_GRANT_ROLE; +TOK_REVOKE_ROLE; +TOK_SHOW_ROLE_GRANT; TOK_SHOWINDEXES; TOK_INDEXCOMMENT; } @@ -260,6 +277,14 @@ | analyzeStatement | lockStatement | unlockStatement + | createRoleStatement + | dropRoleStatement + | grantPrivileges + | revokePrivileges + | showGrants + | showRoleGrants + | grantRole + | revokeRole ; ifExists @@ -694,6 +719,114 @@ : KW_UNLOCK KW_TABLE Identifier partitionSpec? -> ^(TOK_UNLOCKTABLE Identifier partitionSpec?) ; +createRoleStatement +@init { msgs.push("create role"); } +@after { msgs.pop(); } + : KW_CREATE KW_ROLE roleName=Identifier + -> ^(TOK_CREATEROLE $roleName) + ; + +dropRoleStatement +@init {msgs.push("drop role");} +@after {msgs.pop();} + : KW_DROP KW_ROLE roleName=Identifier + -> ^(TOK_DROPROLE $roleName) + ; + +grantPrivileges +@init {msgs.push("grant privileges");} +@after {msgs.pop();} + : KW_GRANT privList=privilegeList + privilegeObject? + KW_TO principalSpecification + (KW_WITH withOption)? + -> ^(TOK_GRANT $privList principalSpecification privilegeObject? withOption?) + ; + +revokePrivileges +@init {msgs.push("revoke privileges");} +@afer {msgs.pop();} + : KW_REVOKE privilegeList privilegeObject? KW_FROM principalSpecification + -> ^(TOK_REVOKE privilegeList principalSpecification privilegeObject?) + ; + +grantRole +@init {msgs.push("grant role");} +@after {msgs.pop();} + : KW_GRANT KW_ROLE Identifier (COMMA Identifier)* KW_TO principalSpecification + -> ^(TOK_GRANT_ROLE principalSpecification Identifier+) + ; + +revokeRole +@init {msgs.push("grant role");} +@after {msgs.pop();} + : KW_REVOKE KW_ROLE Identifier (COMMA Identifier)* KW_FROM principalSpecification + -> ^(TOK_REVOKE_ROLE principalSpecification Identifier+) + ; + +showRoleGrants +@init {msgs.push("show grants");} +@after {msgs.pop();} + : KW_SHOW KW_ROLE KW_GRANT principalName + -> ^(TOK_SHOW_ROLE_GRANT principalName) + ; + +showGrants +@init {msgs.push("show grants");} +@after {msgs.pop();} + : KW_SHOW KW_GRANT principalName privilegeIncludeColObject? + -> ^(TOK_SHOW_GRANT principalName privilegeIncludeColObject?) + ; + +privilegeIncludeColObject +@init {msgs.push("privilege object including columns");} +@after {msgs.pop();} + : KW_ON (table=KW_TABLE|KW_DATABASE) Identifier (LPAREN cols=columnNameList RPAREN)? partitionSpec? + -> ^(TOK_PRIV_OBJECT_COL $table Identifier $cols? partitionSpec?) + ; + +privilegeObject +@init {msgs.push("privilege subject");} +@after {msgs.pop();} + : KW_ON (table=KW_TABLE|KW_DATABASE) Identifier partitionSpec? + -> ^(TOK_PRIV_OBJECT $table Identifier partitionSpec?) + ; + +privilegeList +@init {msgs.push("grant privilege list");} +@after {msgs.pop();} + : privlegeDef (COMMA privlegeDef)* + -> ^(TOK_PRIVILEGE_LIST privlegeDef+) + ; + +privlegeDef +@init {msgs.push("grant privilege");} +@after {msgs.pop();} + : Identifier (LPAREN cols=columnNameList RPAREN)? + -> ^(TOK_PRIVILEGE Identifier $cols?) + ; + +principalSpecification +@init { msgs.push("user/group/role name list"); } +@after { msgs.pop(); } + : principalName (COMMA principalName)* -> ^(TOK_PRINCIPAL_NAME principalName+) + ; + +principalName +@init {msgs.push("user|group|role name");} +@after {msgs.pop();} + : KW_USER Identifier -> ^(TOK_USER Identifier) + | KW_GROUP Identifier -> ^(TOK_GROUP Identifier) + | KW_ROLE Identifier -> ^(TOK_ROLE Identifier) + ; + +withOption +@init {msgs.push("grant with option");} +@after {msgs.pop();} + : KW_GRANT KW_OPTION + -> ^(TOK_GRANT_WITH_OPTION) + ; + metastoreCheck @init { msgs.push("metastore check statement"); } @after { msgs.pop(); } @@ -1937,6 +2070,10 @@ KW_COMPUTE: 'COMPUTE'; KW_STATISTICS: 'STATISTICS'; KW_USE: 'USE'; +KW_USER: 'USER'; +KW_ROLE: 'ROLE'; +KW_OPTION: 'OPTION'; + // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.parse; -import static org.apache.hadoop.util.StringUtils.stringifyException; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; @@ -125,6 +123,7 @@ import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.JoinCondDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc; @@ -888,7 +887,7 @@ } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); } } @@ -924,7 +923,7 @@ // an old SQL construct which has been eliminated in a later Hive // version, so we need to provide full debugging info to help // with fixing the view definition. - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); StringBuilder sb = new StringBuilder(); sb.append(e.getMessage()); ErrorMsg.renderOrigin(sb, viewOrigin); @@ -5867,7 +5866,7 @@ tsDesc.setStatsAggPrefix(k); // set up WritenEntity for replication - outputs.add(new WriteEntity(tab)); + outputs.add(new WriteEntity(tab, true)); // add WriteEntity for each matching partition if (tab.isPartitioned()) { @@ -5878,7 +5877,7 @@ if (partitions != null) { for (Partition partn : partitions) { // inputs.add(new ReadEntity(partn)); // is this needed at all? - outputs.add(new WriteEntity(partn)); + outputs.add(new WriteEntity(partn, true)); } } } @@ -6135,7 +6134,7 @@ } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); } @@ -6235,7 +6234,7 @@ new ArrayList>(), getParseContext(), mvTask, rootTasks, new LinkedHashMap, GenMapRedCtx>(), - inputs, outputs); + inputs, outputs, hiveInAndOut); // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. @@ -6491,6 +6490,9 @@ ASTNode child = ast; LOG.info("Starting Semantic Analysis"); + + //overwrite this if needed. + SessionState.get().setCommandType(HiveOperation.QUERY); // analyze create table command if (ast.getToken().getType() == HiveParser.TOK_CREATETABLE) { @@ -6503,12 +6505,13 @@ // analyze create view command if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW) { child = analyzeCreateView(ast, qb); + SessionState.get().setCommandType(HiveOperation.CREATEVIEW); if (child == null) { return; } viewSelect = child; } - + // continue analyzing from the child ASTNode. doPhase1(child, qb, initPhase1Ctx()); LOG.info("Completed phase 1 of Semantic Analysis"); @@ -7051,6 +7054,9 @@ storageFormat.storageHandler, shared.serdeProps, tblProps, ifNotExists); validateCreateTable(crtTblDesc); + // outputs is empty, which means this create table happens in the current + // database. + SessionState.get().setCommandType(HiveOperation.CREATETABLE); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc), conf)); break; @@ -7058,6 +7064,7 @@ case CTLT: // create table like CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, location, ifNotExists, likeTableName); + SessionState.get().setCommandType(HiveOperation.CREATETABLE); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc), conf)); break; @@ -7082,6 +7089,8 @@ tblProps, ifNotExists); qb.setTableDesc(crtTblDesc); + SessionState.get().setCommandType(HiveOperation.CREATETABLE_AS_SELECT); + return selectStmt; default: throw new SemanticException("Unrecognized command."); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -21,6 +21,7 @@ import java.util.HashMap; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -29,62 +30,74 @@ */ public final class SemanticAnalyzerFactory { - static HashMap commandType = new HashMap(); - static HashMap tablePartitionCommandType = new HashMap(); + static HashMap commandType = new HashMap(); + static HashMap tablePartitionCommandType = new HashMap(); static { - commandType.put(HiveParser.TOK_EXPLAIN, "EXPLAIN"); - commandType.put(HiveParser.TOK_LOAD, "LOAD"); - commandType.put(HiveParser.TOK_CREATEDATABASE, "CREATEDATABASE"); - commandType.put(HiveParser.TOK_DROPDATABASE, "DROPDATABASE"); - commandType.put(HiveParser.TOK_SWITCHDATABASE, "SWITCHDATABASE"); - commandType.put(HiveParser.TOK_CREATETABLE, "CREATETABLE"); - commandType.put(HiveParser.TOK_DROPTABLE, "DROPTABLE"); - commandType.put(HiveParser.TOK_DESCTABLE, "DESCTABLE"); - commandType.put(HiveParser.TOK_DESCFUNCTION, "DESCFUNCTION"); - commandType.put(HiveParser.TOK_MSCK, "MSCK"); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, "ALTERTABLE_ADDCOLS"); - commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, "ALTERTABLE_REPLACECOLS"); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, "ALTERTABLE_RENAMECOL"); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, "ALTERTABLE_RENAME"); - commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, "ALTERTABLE_DROPPARTS"); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, "ALTERTABLE_ADDPARTS"); - commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, "ALTERTABLE_TOUCH"); - commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, "ALTERTABLE_ARCHIVE"); - commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, "ALTERTABLE_UNARCHIVE"); - commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, "ALTERTABLE_PROPERTIES"); - commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, "ALTERTABLE_SERIALIZER"); - commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, "ALTERTABLE_SERDEPROPERTIES"); - commandType.put(HiveParser.TOK_SHOWDATABASES, "SHOWDATABASES"); - commandType.put(HiveParser.TOK_SHOWTABLES, "SHOWTABLES"); - commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, "SHOW_TABLESTATUS"); - commandType.put(HiveParser.TOK_SHOWFUNCTIONS, "SHOWFUNCTIONS"); - commandType.put(HiveParser.TOK_SHOWPARTITIONS, "SHOWPARTITIONS"); - commandType.put(HiveParser.TOK_SHOWINDEXES, "SHOWINDEXES"); - commandType.put(HiveParser.TOK_SHOWLOCKS, "SHOWLOCKS"); - commandType.put(HiveParser.TOK_CREATEFUNCTION, "CREATEFUNCTION"); - commandType.put(HiveParser.TOK_DROPFUNCTION, "DROPFUNCTION"); - commandType.put(HiveParser.TOK_CREATEVIEW, "CREATEVIEW"); - commandType.put(HiveParser.TOK_DROPVIEW, "DROPVIEW"); - commandType.put(HiveParser.TOK_CREATEINDEX, "CREATEINDEX"); - commandType.put(HiveParser.TOK_DROPINDEX, "DROPINDEX"); - commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, "ALTERINDEX_REBUILD"); - commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, "ALTERVIEW_PROPERTIES"); - commandType.put(HiveParser.TOK_QUERY, "QUERY"); - commandType.put(HiveParser.TOK_LOCKTABLE, "LOCKTABLE"); - commandType.put(HiveParser.TOK_UNLOCKTABLE, "UNLOCKTABLE"); + commandType.put(HiveParser.TOK_EXPLAIN, HiveOperation.EXPLAIN); + commandType.put(HiveParser.TOK_LOAD, HiveOperation.LOAD); + commandType.put(HiveParser.TOK_CREATEDATABASE, HiveOperation.CREATEDATABASE); + commandType.put(HiveParser.TOK_DROPDATABASE, HiveOperation.DROPDATABASE); + commandType.put(HiveParser.TOK_SWITCHDATABASE, HiveOperation.SWITCHDATABASE); + commandType.put(HiveParser.TOK_CREATETABLE, HiveOperation.CREATETABLE); + commandType.put(HiveParser.TOK_DROPTABLE, HiveOperation.DROPTABLE); + commandType.put(HiveParser.TOK_DESCTABLE, HiveOperation.DESCTABLE); + commandType.put(HiveParser.TOK_DESCFUNCTION, HiveOperation.DESCFUNCTION); + commandType.put(HiveParser.TOK_MSCK, HiveOperation.MSCK); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, HiveOperation.ALTERTABLE_ADDCOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, HiveOperation.ALTERTABLE_REPLACECOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, HiveOperation.ALTERTABLE_RENAMECOL); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, HiveOperation.ALTERTABLE_RENAME); + commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, HiveOperation.ALTERTABLE_TOUCH); + commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, HiveOperation.ALTERTABLE_ARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_UNARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, HiveOperation.ALTERTABLE_SERIALIZER); + commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, HiveOperation.ALTERTABLE_SERDEPROPERTIES); + commandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, HiveOperation.ALTERTABLE_CLUSTER_SORT); + commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES); + commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); + commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, HiveOperation.SHOW_TABLESTATUS); + commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS); + commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES); + commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS); + commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS); + commandType.put(HiveParser.TOK_CREATEFUNCTION, HiveOperation.CREATEFUNCTION); + commandType.put(HiveParser.TOK_DROPFUNCTION, HiveOperation.DROPFUNCTION); + commandType.put(HiveParser.TOK_CREATEVIEW, HiveOperation.CREATEVIEW); + commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW); + commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX); + commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX); + commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); + commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); + commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); + commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); + commandType.put(HiveParser.TOK_CREATEROLE, HiveOperation.CREATEROLE); + commandType.put(HiveParser.TOK_DROPROLE, HiveOperation.DROPROLE); + commandType.put(HiveParser.TOK_GRANT, HiveOperation.GRANT_PRIVILEGE); + commandType.put(HiveParser.TOK_REVOKE, HiveOperation.REVOKE_PRIVILEGE); + commandType.put(HiveParser.TOK_SHOW_GRANT, HiveOperation.SHOW_GRANT); + commandType.put(HiveParser.TOK_GRANT_ROLE, HiveOperation.GRANT_ROLE); + commandType.put(HiveParser.TOK_REVOKE_ROLE, HiveOperation.REVOKE_ROLE); + commandType.put(HiveParser.TOK_SHOW_ROLE_GRANT, HiveOperation.SHOW_ROLE_GRANT); } static { - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, - new String[] { "ALTERTABLE_PROTECTMODE", "ALTERPARTITION_PROTECTMODE" }); + tablePartitionCommandType.put( + HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, + new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, + HiveOperation.ALTERPARTITION_PROTECTMODE }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, - new String[] { "ALTERTABLE_FILEFORMAT", "ALTERPARTITION_FILEFORMAT" }); + new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT, + HiveOperation.ALTERPARTITION_FILEFORMAT }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, - new String[] { "ALTERTABLE_LOCATION", "ALTERPARTITION_LOCATION" }); + new HiveOperation[] { HiveOperation.ALTERTABLE_LOCATION, + HiveOperation.ALTERPARTITION_LOCATION }); } - public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) throws SemanticException { if (tree.getToken() == null) { @@ -131,9 +144,17 @@ case HiveParser.TOK_ALTERTABLE_UNARCHIVE: case HiveParser.TOK_LOCKTABLE: case HiveParser.TOK_UNLOCKTABLE: + case HiveParser.TOK_CREATEROLE: + case HiveParser.TOK_DROPROLE: + case HiveParser.TOK_GRANT: + case HiveParser.TOK_REVOKE: + case HiveParser.TOK_SHOW_GRANT: + case HiveParser.TOK_GRANT_ROLE: + case HiveParser.TOK_REVOKE_ROLE: + case HiveParser.TOK_SHOW_ROLE_GRANT: return new DDLSemanticAnalyzer(conf); case HiveParser.TOK_ALTERTABLE_PARTITION: - String commandType = null; + HiveOperation commandType = null; Integer type = ((ASTNode) tree.getChild(1)).getToken().getType(); if (tree.getChild(0).getChildCount() > 1) { commandType = tablePartitionCommandType.get(type)[1]; @@ -151,7 +172,7 @@ } } - private static void setSessionCommandType(String commandType) { + private static void setSessionCommandType(HiveOperation commandType) { if (SessionState.get() != null) { SessionState.get().setCommandType(commandType); } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -55,6 +55,12 @@ private ShowTableStatusDesc showTblStatusDesc; private ShowIndexesDesc showIndexesDesc; + private RoleDDLDesc roleDDLDesc; + private GrantDesc grantDesc; + private ShowGrantDesc showGrantDesc; + private RevokeDesc revokeDesc; + private GrantRevokeRoleDDL grantRevokeRoleDDL; + /** * ReadEntitites that are passed to the hooks. */ @@ -297,6 +303,36 @@ } public DDLWork(HashSet inputs, HashSet outputs, + RoleDDLDesc roleDDLDesc) { + this(inputs, outputs); + this.roleDDLDesc = roleDDLDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + GrantDesc grantDesc) { + this(inputs, outputs); + this.grantDesc = grantDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + ShowGrantDesc showGrant) { + this(inputs, outputs); + this.showGrantDesc = showGrant; + } + + public DDLWork(HashSet inputs, HashSet outputs, + RevokeDesc revokeDesc) { + this(inputs, outputs); + this.revokeDesc = revokeDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + GrantRevokeRoleDDL grantRevokeRoleDDL) { + this(inputs, outputs); + this.grantRevokeRoleDDL = grantRevokeRoleDDL; + } + + public DDLWork(HashSet inputs, HashSet outputs, ShowIndexesDesc showIndexesDesc) { this(inputs, outputs); this.showIndexesDesc = showIndexesDesc; @@ -687,4 +723,68 @@ this.dropIdxDesc = dropIdxDesc; } + /** + * @return role ddl desc + */ + public RoleDDLDesc getRoleDDLDesc() { + return roleDDLDesc; + } + + /** + * @param roleDDLDesc role ddl desc + */ + public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) { + this.roleDDLDesc = roleDDLDesc; + } + + /** + * @return grant desc + */ + public GrantDesc getGrantDesc() { + return grantDesc; + } + + /** + * @param grantDesc grant desc + */ + public void setGrantDesc(GrantDesc grantDesc) { + this.grantDesc = grantDesc; + } + + /** + * @return show grant desc + */ + public ShowGrantDesc getShowGrantDesc() { + return showGrantDesc; + } + + /** + * @param showGrantDesc + */ + public void setShowGrantDesc(ShowGrantDesc showGrantDesc) { + this.showGrantDesc = showGrantDesc; + } + + public RevokeDesc getRevokeDesc() { + return revokeDesc; + } + + public void setRevokeDesc(RevokeDesc revokeDesc) { + this.revokeDesc = revokeDesc; + } + + /** + * @return + */ + public GrantRevokeRoleDDL getGrantRevokeRoleDDL() { + return grantRevokeRoleDDL; + } + + /** + * @param grantRevokeRoleDDL + */ + public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) { + this.grantRevokeRoleDDL = grantRevokeRoleDDL; + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java (revision 0) @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +@Explain(displayName = "Grant") +public class GrantDesc extends DDLDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + private List privileges; + + private List principals; + + private boolean grantOption; + + private PrivilegeObjectDesc privilegeSubjectDesc; + + public GrantDesc(PrivilegeObjectDesc privilegeSubject, + List privilegeDesc, List principalDesc, + boolean grantOption) { + super(); + this.privilegeSubjectDesc = privilegeSubject; + this.privileges = privilegeDesc; + this.principals = principalDesc; + this.grantOption = grantOption; + } + + /** + * @return privileges + */ + @Explain(displayName = "Privileges") + public List getPrivileges() { + return privileges; + } + + /** + * @param privileges + */ + public void setPrivileges(List privileges) { + this.privileges = privileges; + } + + /** + * @return principals + */ + @Explain(displayName = "Principals") + public List getPrincipals() { + return principals; + } + + /** + * @param principals + */ + public void setPrincipals(List principals) { + this.principals = principals; + } + + /** + * @return grant option + */ + @Explain(displayName = "grant option") + public boolean isGrantOption() { + return grantOption; + } + + /** + * @param grantOption + */ + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + /** + * @return privilege subject + */ + @Explain(displayName="privilege subject") + public PrivilegeObjectDesc getPrivilegeSubjectDesc() { + return privilegeSubjectDesc; + } + + /** + * @param privilegeSubjectDesc + */ + public void setPrivilegeSubjectDesc(PrivilegeObjectDesc privilegeSubjectDesc) { + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.List; + +@Explain(displayName="grant or revoke roles") +public class GrantRevokeRoleDDL { + + private boolean grant; + + private List principalDesc; + + private List roles; + + public GrantRevokeRoleDDL() { + } + + public GrantRevokeRoleDDL(boolean grant, List roles, List principalDesc) { + super(); + this.grant = grant; + this.principalDesc = principalDesc; + this.roles = roles; + } + + /** + * @return grant or revoke privileges + */ + @Explain(displayName="grant (or revoke)") + public boolean getGrant() { + return grant; + } + + public void setGrant(boolean grant) { + this.grant = grant; + } + + /** + * @return a list of principals + */ + @Explain(displayName="principals") + public List getPrincipalDesc() { + return principalDesc; + } + + public void setPrincipalDesc(List principalDesc) { + this.principalDesc = principalDesc; + } + + /** + * @return a list of roles + */ + @Explain(displayName="roles") + public List getRoles() { + return roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 0) @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.hive.ql.security.authorization.Privilege; + +public enum HiveOperation { + + EXPLAIN("EXPLAIN", null, null), + LOAD("LOAD", null, new Privilege[]{Privilege.ALTER_DATA}), + CREATEDATABASE("CREATEDATABASE", null, null), + DROPDATABASE("DROPDATABASE", null, null), + SWITCHDATABASE("SWITCHDATABASE", null, null), + DROPTABLE ("DROPTABLE", null, new Privilege[]{Privilege.DROP}), + DESCTABLE("DESCTABLE", null, null), + DESCFUNCTION("DESCFUNCTION", null, null), + MSCK("MSCK", null, null), + ALTERTABLE_ADDCOLS("ALTERTABLE_ADDCOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_REPLACECOLS("ALTERTABLE_REPLACECOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAMECOL("ALTERTABLE_RENAMECOL", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAME("ALTERTABLE_RENAME", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_DROPPARTS("ALTERTABLE_DROPPARTS", new Privilege[]{Privilege.DROP}, null), + ALTERTABLE_ADDPARTS("ALTERTABLE_ADDPARTS", new Privilege[]{Privilege.CREATE}, null), + ALTERTABLE_TOUCH("ALTERTABLE_TOUCH", null, null), + ALTERTABLE_ARCHIVE("ALTERTABLE_ARCHIVE", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERTABLE_UNARCHIVE("ALTERTABLE_UNARCHIVE", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERTABLE_PROPERTIES("ALTERTABLE_PROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_SERIALIZER("ALTERTABLE_SERIALIZER", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_SERDEPROPERTIES("ALTERTABLE_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", new Privilege[]{Privilege.ALTER_METADATA}, null), + SHOWDATABASES("SHOWDATABASES", new Privilege[]{Privilege.SHOW_DATABASE}, null), + SHOWTABLES("SHOWTABLES", null, null), + SHOW_TABLESTATUS("SHOW_TABLESTATUS", null, null), + SHOWFUNCTIONS("SHOWFUNCTIONS", null, null), + SHOWINDEXES("SHOWINDEXES", null, null), + SHOWPARTITIONS("SHOWPARTITIONS", null, null), + SHOWLOCKS("SHOWLOCKS", null, null), + CREATEFUNCTION("CREATEFUNCTION", null, null), + DROPFUNCTION("DROPFUNCTION", null, null), + CREATEVIEW("CREATEVIEW", null, null), + DROPVIEW("DROPVIEW", null, null), + CREATEINDEX("CREATEINDEX", null, null), + DROPINDEX("DROPINDEX", null, null), + ALTERINDEX_REBUILD("ALTERINDEX_REBUILD", null, null), + ALTERVIEW_PROPERTIES("ALTERVIEW_PROPERTIES", null, null), + LOCKTABLE("LOCKTABLE", new Privilege[]{Privilege.LOCK}, null), + UNLOCKTABLE("UNLOCKTABLE", new Privilege[]{Privilege.LOCK}, null), + CREATEROLE("CREATEROLE", null, null), + DROPROLE("DROPROLE", null, null), + GRANT_PRIVILEGE("GRANT_PRIVILEGE", null, null), + REVOKE_PRIVILEGE("REVOKE_PRIVILEGE", null, null), + SHOW_GRANT("SHOW_GRANT", null, null), + GRANT_ROLE("GRANT_ROLE", null, null), + REVOKE_ROLE("REVOKE_ROLE", null, null), + SHOW_ROLE_GRANT("SHOW_ROLE_GRANT", null, null), + ALTERTABLE_PROTECTMODE("ALTERTABLE_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_PROTECTMODE("ALTERPARTITION_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_FILEFORMAT("ALTERTABLE_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_FILEFORMAT("ALTERPARTITION_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_LOCATION("ALTERTABLE_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERPARTITION_LOCATION("ALTERPARTITION_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), + CREATETABLE("CREATETABLE", null, new Privilege[]{Privilege.CREATE}), + CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}), + QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA}), + ; + + private String operationName; + + private Privilege[] inputRequiredPrivileges; + + private Privilege[] outputRequiredPrivileges; + + public Privilege[] getInputRequiredPrivileges() { + return inputRequiredPrivileges; + } + + public Privilege[] getOutputRequiredPrivileges() { + return outputRequiredPrivileges; + } + + public String getOperationName() { + return operationName; + } + + private HiveOperation(String operationName, + Privilege[] inputRequiredPrivileges, Privilege[] outputRequiredPrivileges) { + this.operationName = operationName; + this.inputRequiredPrivileges = inputRequiredPrivileges; + this.outputRequiredPrivileges = outputRequiredPrivileges; + } + + public static class PrivilegeAgreement { + + private Privilege[] inputUserLevelRequiredPriv; + private Privilege[] inputDBLevelRequiredPriv; + private Privilege[] inputTableLevelRequiredPriv; + private Privilege[] inputColumnLevelRequiredPriv; + private Privilege[] outputUserLevelRequiredPriv; + private Privilege[] outputDBLevelRequiredPriv; + private Privilege[] outputTableLevelRequiredPriv; + private Privilege[] outputColumnLevelRequiredPriv; + + public PrivilegeAgreement putUserLevelRequiredPriv( + Privilege[] inputUserLevelRequiredPriv, + Privilege[] outputUserLevelRequiredPriv) { + this.inputUserLevelRequiredPriv = inputUserLevelRequiredPriv; + this.outputUserLevelRequiredPriv = outputUserLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putDBLevelRequiredPriv( + Privilege[] inputDBLevelRequiredPriv, + Privilege[] outputDBLevelRequiredPriv) { + this.inputDBLevelRequiredPriv = inputDBLevelRequiredPriv; + this.outputDBLevelRequiredPriv = outputDBLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putTableLevelRequiredPriv( + Privilege[] inputTableLevelRequiredPriv, + Privilege[] outputTableLevelRequiredPriv) { + this.inputTableLevelRequiredPriv = inputTableLevelRequiredPriv; + this.outputTableLevelRequiredPriv = outputTableLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putColumnLevelRequiredPriv( + Privilege[] inputColumnLevelPriv, Privilege[] outputColumnLevelPriv) { + this.inputColumnLevelRequiredPriv = inputColumnLevelPriv; + this.outputColumnLevelRequiredPriv = outputColumnLevelPriv; + return this; + } + + public Privilege[] getInputUserLevelRequiredPriv() { + return inputUserLevelRequiredPriv; + } + + public Privilege[] getInputDBLevelRequiredPriv() { + return inputDBLevelRequiredPriv; + } + + public Privilege[] getInputTableLevelRequiredPriv() { + return inputTableLevelRequiredPriv; + } + + public Privilege[] getInputColumnLevelRequiredPriv() { + return inputColumnLevelRequiredPriv; + } + + public Privilege[] getOutputUserLevelRequiredPriv() { + return outputUserLevelRequiredPriv; + } + + public Privilege[] getOutputDBLevelRequiredPriv() { + return outputDBLevelRequiredPriv; + } + + public Privilege[] getOutputTableLevelRequiredPriv() { + return outputTableLevelRequiredPriv; + } + + public Privilege[] getOutputColumnLevelRequiredPriv() { + return outputColumnLevelRequiredPriv; + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveQueryReadWrite.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveQueryReadWrite.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveQueryReadWrite.java (revision 0) @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; + +public class HiveQueryReadWrite { + + Map> opToInputList = new HashMap>(); + Map> opToOutputList = new HashMap>(); + + public void putInputForOperation(ReadEntity input, HiveOperation op) { + if (this.getOpToInputList().get(op) == null) { + this.getOpToInputList().put(op, new ArrayList()); + } + this.getOpToInputList().get(op).add(input); + } + + public void putOutputForOperation(WriteEntity output, HiveOperation op) { + if (this.getOpToOutputList().get(op) == null) { + this.getOpToOutputList().put(op, new ArrayList()); + } + this.getOpToOutputList().get(op).add(output); + } + + public Map> getOpToInputList() { + return opToInputList; + } + + public Map> getOpToOutputList() { + return opToOutputList; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java (revision 0) @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Principal") +public class PrincipalDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + public static enum PrincipalType { + USER, GROUP, ROLE; + } + + private String name; + + private PrincipalType type; + + public PrincipalDesc(String name, PrincipalType type) { + super(); + this.name = name; + this.type = type; + } + + public PrincipalDesc() { + super(); + } + + @Explain(displayName="name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Explain(displayName="type") + public PrincipalType getType() { + return type; + } + + public void setType(PrincipalType type) { + this.type = type; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java (revision 0) @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.ql.security.authorization.Privilege; + +@Explain(displayName = "Privilege") +public class PrivilegeDesc implements Serializable, Cloneable { + private static final long serialVersionUID = 1L; + + private Privilege privilege; + + private List columns; + + public PrivilegeDesc(Privilege privilege, List columns) { + super(); + this.privilege = privilege; + this.columns = columns; + } + + public PrivilegeDesc() { + super(); + } + + /** + * @return privilege definition + */ + @Explain(displayName = "privilege") + public Privilege getPrivilege() { + return privilege; + } + + /** + * @param privilege + */ + public void setPrivilege(Privilege privilege) { + this.privilege = privilege; + } + + /** + * @return columns on which the given privilege take affect. + */ + @Explain(displayName = "columns") + public List getColumns() { + return columns; + } + + /** + * @param columns + */ + public void setColumns(List columns) { + this.columns = columns; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java (revision 0) @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.HashMap; + +@Explain(displayName="privilege subject") +public class PrivilegeObjectDesc { + + private boolean table; + + private String object; + + private HashMap partSpec; + + public PrivilegeObjectDesc(boolean isTable, String object, + HashMap partSpec) { + super(); + this.table = isTable; + this.object = object; + this.partSpec = partSpec; + } + + public PrivilegeObjectDesc() { + } + + @Explain(displayName="is table") + public boolean getTable() { + return table; + } + + public void setTable(boolean isTable) { + this.table = isTable; + } + + @Explain(displayName="object") + public String getObject() { + return object; + } + + public void setObject(String object) { + this.object = object; + } + + @Explain(displayName="partition spec") + public HashMap getPartSpec() { + return partSpec; + } + + public void setPartSpec(HashMap partSpec) { + this.partSpec = partSpec; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java (revision 0) @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +@Explain(displayName="Revoke") +public class RevokeDesc extends DDLDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + private List privileges; + + private List principals; + + private PrivilegeObjectDesc privilegeSubjectDesc; + + public RevokeDesc(){ + } + + public RevokeDesc(List privileges, + List principals, PrivilegeObjectDesc privilegeSubjectDesc) { + super(); + this.privileges = privileges; + this.principals = principals; + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + + public List getPrivileges() { + return privileges; + } + + public void setPrivileges(List privileges) { + this.privileges = privileges; + } + + public List getPrincipals() { + return principals; + } + + public void setPrincipals(List principals) { + this.principals = principals; + } + + public PrivilegeObjectDesc getPrivilegeSubjectDesc() { + return privilegeSubjectDesc; + } + + public void setPrivilegeSubjectDesc(PrivilegeObjectDesc privilegeSubjectDesc) { + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java (revision 0) @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Create Role") +public class RoleDDLDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + private String name; + + private boolean role; + + private boolean group; + + private RoleOperation operation; + + private String resFile; + + public static enum RoleOperation { + DROP_ROLE("drop_role"), CREATE_ROLE("create_role"), SHOW_ROLE_GRANT("show_roles"); + private String operationName; + + private RoleOperation() { + } + + private RoleOperation(String operationName) { + this.operationName = operationName; + } + + public String getOperationName() { + return operationName; + } + + public String toString () { + return this.operationName; + } + } + + public RoleDDLDesc(){ + } + + public RoleDDLDesc(String roleName, RoleOperation operation) { + this(roleName, false, false, operation); + } + + public RoleDDLDesc(String principalName, boolean isRole, boolean isGroup, + RoleOperation operation) { + this.name = principalName; + this.role = isRole; + this.group = isGroup; + this.operation = operation; + } + + @Explain(displayName = "name") + public String getName() { + return name; + } + + public void setName(String roleName) { + this.name = roleName; + } + + @Explain(displayName = "role operation") + public RoleOperation getOperation() { + return operation; + } + + public void setOperation(RoleOperation operation) { + this.operation = operation; + } + + public boolean getRole() { + return role; + } + + public void setRole(boolean role) { + this.role = role; + } + + public boolean getGroup() { + return group; + } + + public void setGroup(boolean group) { + this.group = group; + } + + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.util.List; + +@Explain(displayName="show grant desc") +public class ShowGrantDesc { + + private PrincipalDesc principalDesc; + + private PrivilegeObjectDesc hiveObj; + + private List columns; + + private String resFile; + + public ShowGrantDesc(){ + } + + public ShowGrantDesc(String resFile, PrincipalDesc principalDesc, + PrivilegeObjectDesc subjectObj, List columns) { + this.resFile = resFile; + this.principalDesc = principalDesc; + this.hiveObj = subjectObj; + this.columns = columns; + } + + @Explain(displayName="principal desc") + public PrincipalDesc getPrincipalDesc() { + return principalDesc; + } + + public void setPrincipalDesc(PrincipalDesc principalDesc) { + this.principalDesc = principalDesc; + } + + @Explain(displayName="object") + public PrivilegeObjectDesc getHiveObj() { + return hiveObj; + } + + public void setHiveObj(PrivilegeObjectDesc subjectObj) { + this.hiveObj = subjectObj; + } + + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + + public List getColumns() { + return columns; + } + + public void setColumns(List columns) { + this.columns = columns; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java (revision 0) @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; + +public interface Authenticator { + + public String getUserName(); + + public List getGroupNames(); + + public boolean detroy(); + + public void init(Configuration conf); + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java (revision 0) @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +public class AuthenticatorFactory { + + @SuppressWarnings("unchecked") + public static Authenticator getAuthenticator(Configuration conf) + throws HiveException { + + String clsStr = HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER); + + Authenticator ret = null; + try { + Class cls = null; + if (clsStr == null || clsStr.trim().equals("")) { + cls = HadoopDefaultAuthenticator.class; + } else { + cls = (Class) Class + .forName(clsStr); + } + if (cls != null) { + ret = cls.newInstance(); + ret.init(conf); + } + } catch (Exception e) { + throw new HiveException(e); + } + + return ret; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java (revision 0) @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; + +public class HadoopDefaultAuthenticator implements Authenticator { + + private String userName; + private List groupNames; + + @Override + public List getGroupNames() { + return groupNames; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public void init(Configuration conf) { + UserGroupInformation ugi = null; + try { + ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); + } catch (Exception e) { + throw new RuntimeException(e); + } + + if (ugi == null) { + throw new RuntimeException( + "Can not initialize HadoopDefaultAuthenticator."); + } + + this.userName = ugi.getUserName(); + if (ugi.getGroupNames() != null) { + this.groupNames = Arrays.asList(ugi.getGroupNames()); + } + + System.out.println("User Name is :" + this.getUserName()); + System.out.println("Group Names are :" + this.getGroupNames()); + } + + @Override + public boolean detroy() { + return true; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (revision 1036686) +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (working copy) @@ -40,6 +40,12 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.history.HiveHistory; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.Authenticator; +import org.apache.hadoop.hive.ql.security.AuthenticatorFactory; +import org.apache.hadoop.hive.ql.security.authorization.AuthorizationManagerFactory; +import org.apache.hadoop.hive.ql.security.authorization.AuthorizationProviderManager; import org.apache.hadoop.hive.ql.util.DosToUnix; import org.apache.log4j.LogManager; import org.apache.log4j.PropertyConfigurator; @@ -77,7 +83,11 @@ /** * type of the command. */ - private String commandType; + private HiveOperation commandType; + + private AuthorizationProviderManager authorizer; + + private Authenticator authenticator; /** * Lineage state. @@ -150,11 +160,15 @@ /** * start a new session and set it to current session. + * @throws HiveException */ - public static SessionState start(HiveConf conf) { + public static SessionState start(HiveConf conf) throws HiveException { SessionState ss = new SessionState(conf); ss.getConf().setVar(HiveConf.ConfVars.HIVESESSIONID, makeSessionId()); ss.hiveHist = new HiveHistory(ss); + ss.authenticator = AuthenticatorFactory.getAuthenticator(conf); + ss.authorizer = AuthorizationManagerFactory.getAuthorizeProviderManager( + conf, ss.authenticator); tss.set(ss); return (ss); } @@ -163,6 +177,7 @@ * set current session to existing session object if a thread is running * multiple sessions - it must call this method with the new session object * when switching from one session to another. + * @throws HiveException */ public static SessionState start(SessionState startSs) { @@ -176,6 +191,16 @@ if (startSs.hiveHist == null) { startSs.hiveHist = new HiveHistory(startSs); } + + try { + startSs.authenticator = AuthenticatorFactory.getAuthenticator(startSs + .getConf()); + startSs.authorizer = AuthorizationManagerFactory + .getAuthorizeProviderManager(startSs.getConf(), startSs.authenticator); + } catch (HiveException e) { + throw new RuntimeException(e); + } + return startSs; } @@ -539,10 +564,30 @@ } public String getCommandType() { + return commandType.getOperationName(); + } + + public HiveOperation getHiveOperation() { return commandType; } - public void setCommandType(String commandType) { + public void setCommandType(HiveOperation commandType) { this.commandType = commandType; } + + public AuthorizationProviderManager getAuthorizer() { + return authorizer; + } + + public void setAuthorizer(AuthorizationProviderManager authorizer) { + this.authorizer = authorizer; + } + + public Authenticator getAuthenticator() { + return authenticator; + } + + public void setAuthenticator(Authenticator authenticator) { + this.authenticator = authenticator; + } } Index: ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (revision 1036686) +++ ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (working copy) @@ -24,10 +24,12 @@ import java.io.DataInputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.PrintStream; import java.io.Serializable; +import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.Arrays; import java.util.Deque; @@ -389,6 +391,9 @@ } public void createSources() throws Exception { + + startSessionState(); + // Create a bunch of tables with columns key and value LinkedList cols = new LinkedList(); cols.add("key"); @@ -490,7 +495,8 @@ testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); // conf.logVars(System.out); // System.out.flush(); - + + SessionState.start(conf); db = Hive.get(conf); fs = FileSystem.get(conf); drv = new Driver(conf); @@ -541,6 +547,8 @@ createSources(); } + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, + "org.apache.hadoop.hive.ql.security.DummpyAuthenticator"); CliSessionState ss = new CliSessionState(conf); assert ss != null; ss.in = System.in; @@ -554,7 +562,7 @@ ss.err = ss.out; ss.setIsSilent(true); SessionState oldSs = SessionState.get(); - if (oldSs != null) { + if (oldSs != null && oldSs.out != null && oldSs.out != System.out) { oldSs.out.close(); } SessionState.start(ss); @@ -566,6 +574,19 @@ cliDriver.processInitFiles(ss); } + private CliSessionState startSessionState() + throws FileNotFoundException, UnsupportedEncodingException { + + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, + "org.apache.hadoop.hive.ql.security.DummpyAuthenticator"); + + CliSessionState ss = new CliSessionState(conf); + assert ss != null; + + SessionState.start(ss); + return ss; + } + public int executeOne(String tname) { String q = qMap.get(tname); @@ -898,6 +919,7 @@ "-I", "at junit", "-I", "Caused by:", "-I", "QUERYID_LOCK:", + "-I", "grantTime", "-I", "[.][.][.] [0-9]* more", (new File(logDir, tname + ".out")).getPath(), outFileName }; Index: ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java (revision 1036686) +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java (working copy) @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; public class TestSemanticAnalyzerHookLoading extends TestCase { @@ -35,6 +36,7 @@ HiveConf conf = new HiveConf(this.getClass()); conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, DummySemanticAnalyzerHook.class.getName()); conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + SessionState.start(conf); Driver driver = new Driver(conf); driver.run("drop table testDL"); Index: ql/src/test/org/apache/hadoop/hive/ql/security/DummpyAuthenticator.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/security/DummpyAuthenticator.java (revision 0) +++ ql/src/test/org/apache/hadoop/hive/ql/security/DummpyAuthenticator.java (revision 0) @@ -0,0 +1,39 @@ +package org.apache.hadoop.hive.ql.security; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; + +public class DummpyAuthenticator implements Authenticator { + + private List groupNames; + private String userName; + + public DummpyAuthenticator() { + this.groupNames = new ArrayList(); + groupNames.add("hive_test_group1"); + groupNames.add("hive_test_group2"); + userName = "hive_test_user"; + } + + @Override + public boolean detroy() { + return true; + } + + @Override + public List getGroupNames() { + return groupNames; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public void init(Configuration conf) { + } + +} Index: ql/src/test/queries/clientnegative/authorization_part.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_part.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_part.q (revision 0) @@ -0,0 +1,32 @@ +create table authorization_part_fail (key int, value string) partitioned by (ds string); +set hive.security.authorization.enabled=true; + +grant `Create` on table authorization_part_fail to user hive_test_user; +grant `Overwrite` on table authorization_part_fail to user hive_test_user; +grant `Drop` on table authorization_part_fail to user hive_test_user; +grant `select` on table src to user hive_test_user; + +-- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1; +grant `select` on table authorization_part_fail to group hive_test_group1; + +show grant group hive_test_group1 on table authorization_part_fail; + +insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010'); +select key, value from authorization_part_fail where ds='2010' order by key limit 20; + +insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src; +show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011'); +show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011'); +select key, value from authorization_part_fail where ds='2011' order by key limit 20; + +select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20; + +revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1; + +select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20; + +drop table authorization_part_fail; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_1.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_1.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_1.q (revision 0) @@ -0,0 +1,88 @@ +create table src_autho_test as select * from src; +set hive.security.authorization.enabled=true; +grant `drop` on table src_autho_test to user hive_test_user; + +--table grant to user + +grant `select` on table src_autho_test to user hive_test_user; + +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select` on table src_autho_test from user hive_test_user; +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +--column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user; + +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from user hive_test_user; +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + + +--table grant to group + +grant `select` on table src_autho_test to group hive_test_group1; + +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select` on table src_autho_test from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +--column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1; + +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +--role +create role src_role; +grant role src_role to user hive_test_user; +show role grant user hive_test_user; + +--column grant to role + +grant `select`(key) on table src_autho_test to role src_role; + +show grant role src_role on table src_autho_test; +show grant role src_role on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from role src_role; + +--table grant to role + +grant `select` on table src_autho_test to role src_role; + +select key from src_autho_test order by key limit 20; + +show grant role src_role on table src_autho_test; +show grant role src_role on table src_autho_test(key); +revoke `select` on table src_autho_test from role src_role; + +-- drop role +drop role src_role; + +set hive.security.authorization.enabled=false; +drop table src_autho_test; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_2.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_2.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_2.q (revision 0) @@ -0,0 +1,107 @@ +create table authorization_part (key int, value string) partitioned by (ds string); +set hive.security.authorization.enabled=true; + +-- column grant to user +grant `Create` on table authorization_part to user hive_test_user; +grant `Overwrite` on table authorization_part to user hive_test_user; +grant `Drop` on table authorization_part to user hive_test_user; +grant `select` on table src to user hive_test_user; + +show grant user hive_test_user on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +grant `select`(key) on table authorization_part to user hive_test_user; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); +show grant user hive_test_user on table authorization_part(key); +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part from user hive_test_user; +show grant user hive_test_user on table authorization_part(key); +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- table grant to user +show grant user hive_test_user on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +grant `select` on table authorization_part to user hive_test_user; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant user hive_test_user on table authorization_part partition (ds='2010'); +show grant user hive_test_user on table authorization_part; +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part from user hive_test_user; +show grant user hive_test_user on table authorization_part; +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user; +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- column grant to group + +show grant group hive_test_group1 on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +grant `select`(key) on table authorization_part to group hive_test_group1; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part(key); +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part(key); +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- table grant to group +show grant group hive_test_group1 on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +grant `select` on table authorization_part to group hive_test_group1; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part; +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + + +revoke `select` on table src from user hive_test_user; +set hive.security.authorization.enabled=false; +drop table authorization_part; \ No newline at end of file Index: ql/src/test/queries/clientpositive/input19.q =================================================================== --- ql/src/test/queries/clientpositive/input19.q (revision 1036686) +++ ql/src/test/queries/clientpositive/input19.q (working copy) @@ -1,5 +1,5 @@ -create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE; +create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog; SELECT a.* FROM apachelog a; Index: ql/src/test/queries/clientpositive/show_indexes_edge_cases.q =================================================================== --- ql/src/test/queries/clientpositive/show_indexes_edge_cases.q (revision 1036686) +++ ql/src/test/queries/clientpositive/show_indexes_edge_cases.q (working copy) @@ -21,5 +21,10 @@ EXPLAIN SHOW INDEXES ON show_idx_empty; SHOW INDEXES ON show_idx_empty; +DROP INDEX idx_comment on show_idx_empty; +DROP INDEX idx_compound on show_idx_empty; DROP TABLE show_idx_empty; + +DROP INDEX idx_1 on show_idx_full; +DROP INDEX idx_2 on show_idx_full; DROP TABLE show_idx_full; \ No newline at end of file Index: ql/src/test/results/clientnegative/authorization_part.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_part.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_part.q.out (revision 0) @@ -0,0 +1,221 @@ +PREHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part_fail +PREHOOK: query: grant `Create` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Overwrite` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Overwrite` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Drop` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Drop` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table src to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table src to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: -- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: -- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table authorization_part_fail to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part_fail to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part_fail +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290044904 +grantor +PREHOOK: query: insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part_fail@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part_fail@ds=2010 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2010 +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290044912 +grantor +PREHOOK: query: select key, value from authorization_part_fail where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-33_128_903153717003879345/-mr-10000 +POSTHOOK: query: select key, value from authorization_part_fail where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-33_128_903153717003879345/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +5 val_5 +5 val_5 +5 val_5 +8 val_8 +9 val_9 +10 val_10 +11 val_11 +12 val_12 +12 val_12 +15 val_15 +15 val_15 +17 val_17 +18 val_18 +18 val_18 +19 val_19 +PREHOOK: query: insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part_fail@ds=2011 +POSTHOOK: query: insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part_fail@ds=2011 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2011 +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290044926 +grantor +PREHOOK: query: select key, value from authorization_part_fail where ds='2011' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2011 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-46_778_1863758157097193316/-mr-10000 +POSTHOOK: query: select key, value from authorization_part_fail where ds='2011' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2011 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-46_778_1863758157097193316/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +5 val_5 +5 val_5 +5 val_5 +8 val_8 +9 val_9 +10 val_10 +11 val_11 +12 val_12 +12 val_12 +15 val_15 +15 val_15 +17 val_17 +18 val_18 +18 val_18 +19 val_19 +PREHOOK: query: select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2010 +PREHOOK: Input: default@authorization_part_fail@ds=2011 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-52_286_1335577389098462206/-mr-10000 +POSTHOOK: query: select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2010 +POSTHOOK: Input: default@authorization_part_fail@ds=2011 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-48-52_286_1335577389098462206/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 2010 +0 val_0 2010 +0 val_0 2010 +0 val_0 2011 +0 val_0 2011 +0 val_0 2011 +2 val_2 2010 +2 val_2 2011 +4 val_4 2010 +4 val_4 2011 +5 val_5 2010 +5 val_5 2010 +5 val_5 2010 +5 val_5 2011 +5 val_5 2011 +5 val_5 2011 +8 val_8 2010 +8 val_8 2011 +9 val_9 2010 +9 val_9 2011 +PREHOOK: query: revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +Authrization failed:No privilege 'Select' found for inputs { database:default, table:authorization_part_fail, partitionName:ds=2010, columnName:value}. Use show grant to get more details. Index: ql/src/test/results/clientpositive/alter4.q.out =================================================================== --- ql/src/test/results/clientpositive/alter4.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/alter4.q.out (working copy) @@ -10,13 +10,13 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:thiruvel, createTime:1286800231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286800231}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:heyongqiang, createTime:1290068480, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290068480}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@set_bucketing_test PREHOOK: Output: default@set_bucketing_test POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@set_bucketing_test POSTHOOK: Output: default@set_bucketing_test PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test @@ -26,7 +26,7 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:thiruvel, createTime:1286800231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=thiruvel, last_modified_time=1286800231, transient_lastDdlTime=1286800231}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:heyongqiang, createTime:1290068480, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1290068480, transient_lastDdlTime=1290068480}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: -- Cleanup DROP TABLE set_bucketing_test PREHOOK: type: DROPTABLE @@ -77,13 +77,13 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:thiruvel, createTime:1286800232, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286800232}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:heyongqiang, createTime:1290068481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290068481}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: alter4_db@set_bucketing_test PREHOOK: Output: alter4_db@set_bucketing_test POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: alter4_db@set_bucketing_test POSTHOOK: Output: alter4_db@set_bucketing_test PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test @@ -93,7 +93,7 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:thiruvel, createTime:1286800232, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=thiruvel, last_modified_time=1286800232, transient_lastDdlTime=1286800232}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:heyongqiang, createTime:1290068481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1290068481, transient_lastDdlTime=1290068481}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: DROP TABLE set_bucketing_test PREHOOK: type: DROPTABLE PREHOOK: Input: alter4_db@set_bucketing_test Index: ql/src/test/results/clientpositive/authorization_1.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_1.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_1.q.out (revision 0) @@ -0,0 +1,435 @@ +PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: grant `drop` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `drop` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: --table grant to user + +grant `select` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to user + +grant `select` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290054119 +grantor + +database default +table src_autho_test +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290054119 +grantor +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-21-59_301_6646767296108941712/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-21-59_301_6646767296108941712/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290054119 +grantor +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290054119 +grantor +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +columnName key +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290054125 +grantor +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-05_277_8944836447004950426/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-05_277_8944836447004950426/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290054119 +grantor +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --table grant to group + +grant `select` on table src_autho_test to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to group + +grant `select` on table src_autho_test to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290054131 +grantor +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-11_230_4053213311608085494/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-11_230_4053213311608085494/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select` on table src_autho_test from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-17_512_4648862264183836693/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-17_512_4648862264183836693/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --role +create role src_role +PREHOOK: type: CREATEROLE +POSTHOOK: query: --role +create role src_role +POSTHOOK: type: CREATEROLE +PREHOOK: query: grant role src_role to user hive_test_user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: grant role src_role to user hive_test_user +POSTHOOK: type: GRANT_ROLE +PREHOOK: query: show role grant user hive_test_user +PREHOOK: type: SHOW_ROLE_GRANT +POSTHOOK: query: show role grant user hive_test_user +POSTHOOK: type: SHOW_ROLE_GRANT +role name:src_role +database:default +database:default +PREHOOK: query: --column grant to role + +grant `select`(key) on table src_autho_test to role src_role +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to role + +grant `select`(key) on table src_autho_test to role src_role +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant role src_role on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-23_448_4304254819185136892/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-23_448_4304254819185136892/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from role src_role +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from role src_role +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: --table grant to role + +grant `select` on table src_autho_test to role src_role +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to role + +grant `select` on table src_autho_test to role src_role +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-28_949_4896291117896236466/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_20-22-28_949_4896291117896236466/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +userName src_role +isRole true +isGroup false +privileges Select +grantTime 1290054148 +grantor +PREHOOK: query: show grant role src_role on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: revoke `select` on table src_autho_test from role src_role +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from role src_role +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: -- drop role +drop role src_role +PREHOOK: type: DROPROLE +POSTHOOK: query: -- drop role +drop role src_role +POSTHOOK: type: DROPROLE +PREHOOK: query: drop table src_autho_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: drop table src_autho_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: default@src_autho_test Index: ql/src/test/results/clientpositive/authorization_2.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_2.q.out (revision 0) @@ -0,0 +1,1179 @@ +PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: -- column grant to user +grant `Create` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: -- column grant to user +grant `Create` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Overwrite` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Overwrite` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Drop` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Drop` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table src to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table src to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043818 +grantor +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043819 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043819 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043819 +grantor +PREHOOK: query: grant `select`(key) on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select`(key) on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043830 +grantor +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +columnName key +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043819 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-31_155_2709803620287718404/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-31_155_2709803620287718404/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043830 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-37_334_1717795772174468760/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-37_334_1717795772174468760/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- table grant to user +show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- table grant to user +show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043818 +grantor +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043843 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043843 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043843 +grantor +PREHOOK: query: grant `select` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043853 +grantor +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043843 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-53_901_3485759327509197039/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-30-53_901_3485759327509197039/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043818 +grantor + +database default +table authorization_part +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043818 +grantor +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Select +grantTime 1290043853 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-00_744_7580912878961416272/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-00_744_7580912878961416272/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Create +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Drop +grantTime 1290043853 +grantor + +database default +table authorization_part +partition ds=2010 +userName hive_test_user +isRole false +isGroup false +privileges Overwrite +grantTime 1290043853 +grantor +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- column grant to group + +show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- column grant to group + +show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant `select`(key) on table authorization_part to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select`(key) on table authorization_part to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-17_728_8464515309351798612/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-17_728_8464515309351798612/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-27_756_1143117991430950607/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-27_756_1143117991430950607/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- table grant to group +show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- table grant to group +show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant `select` on table authorization_part to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290043907 +grantor +PREHOOK: query: show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290043898 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-48_083_526417722386692409/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-48_083_526417722386692409/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +userName hive_test_group1 +isRole false +isGroup true +privileges Select +grantTime 1290043907 +grantor +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-57_336_2449808865013005476/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-17_17-31-57_336_2449808865013005476/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: revoke `select` on table src from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: drop table authorization_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: drop table authorization_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/bucket_groupby.q.out =================================================================== --- ql/src/test/results/clientpositive/bucket_groupby.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/bucket_groupby.q.out (working copy) @@ -11,7 +11,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1288389460}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1290108934}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='100') select key, value from src sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -107,11 +107,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=100 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-45_369_7380463323239974897/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-35-47_282_4113045751057786056/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=100 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-45_369_7380463323239974897/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-35-47_282_4113045751057786056/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] 0 3 @@ -125,11 +125,11 @@ 113 2 114 1 PREHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -144,7 +144,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, last_modified_by=sdong, last_modified_time=1288389468, transient_lastDdlTime=1288389468, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, last_modified_by=heyongqiang, last_modified_time=1290108955, transient_lastDdlTime=1290108955, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='101') select key, value from src distribute by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -246,11 +246,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-53_750_4694546524307257085/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-06_455_5652519335128156482/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-53_750_4694546524307257085/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-06_455_5652519335128156482/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -354,11 +354,11 @@ PREHOOK: query: select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-57_183_3467407082757519286/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-13_777_4853958758753332533/-mr-10000 POSTHOOK: query: select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-57_183_3467407082757519286/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-13_777_4853958758753332533/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -453,11 +453,11 @@ PREHOOK: query: select abs(length(key)), count(1) from clustergroupby where ds='101' group by abs(length(key)) limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-00_556_1225036933831928400/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-22_353_1148410217887255773/-mr-10000 POSTHOOK: query: select abs(length(key)), count(1) from clustergroupby where ds='101' group by abs(length(key)) limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-00_556_1225036933831928400/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-22_353_1148410217887255773/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -562,11 +562,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key,3 limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-03_994_2628288731314011109/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-29_836_2726844280049537094/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key,3 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-03_994_2628288731314011109/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-29_836_2726844280049537094/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -675,11 +675,11 @@ PREHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-07_367_3684028022557451628/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-36_869_6506961229190926826/-mr-10000 POSTHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-07_367_3684028022557451628/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-36_869_6506961229190926826/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -777,12 +777,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=100 PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-10_796_7851925785141685773/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-45_158_7554312247658259364/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=100 POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-10_796_7851925785141685773/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-45_158_7554312247658259364/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1185,12 +1185,12 @@ PREHOOK: query: --sort columns-- alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: --sort columns-- alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1209,7 +1209,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value], sortCols:[Order(col:key, order:1), Order(col:value, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, last_modified_by=sdong, last_modified_time=1288389494, transient_lastDdlTime=1288389494, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value], sortCols:[Order(col:key, order:1), Order(col:value, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, last_modified_by=heyongqiang, last_modified_time=1290109014, transient_lastDdlTime=1290109014, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='102') select key, value from src distribute by value sort by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1313,11 +1313,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-19_617_397682497046947245/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-04_173_5662986070533466247/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-19_617_397682497046947245/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-04_173_5662986070533466247/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1423,11 +1423,11 @@ PREHOOK: query: select value, count(1) from clustergroupby where ds='102' group by value limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-23_046_1077430162048304187/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-11_984_9094618456601035974/-mr-10000 POSTHOOK: query: select value, count(1) from clustergroupby where ds='102' group by value limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-23_046_1077430162048304187/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-11_984_9094618456601035974/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1543,11 +1543,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key, value limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-28_476_3643193095660435074/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-18_617_5093814006573436611/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key, value limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-28_476_3643193095660435074/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-18_617_5093814006573436611/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1565,11 +1565,11 @@ 113 2 114 1 PREHOOK: query: alter table clustergroupby clustered by (value, key) sorted by (key) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: alter table clustergroupby clustered by (value, key) sorted by (key) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1592,7 +1592,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value, key], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=3, numFiles=3, last_modified_by=sdong, last_modified_time=1288389511, transient_lastDdlTime=1288389511, numRows=1500, totalSize=17436}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value, key], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=3, numFiles=3, last_modified_by=heyongqiang, last_modified_time=1290109047, transient_lastDdlTime=1290109047, numRows=1500, totalSize=17436}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='103') select key, value from src distribute by value, key sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1700,11 +1700,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='103' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=103 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-36_974_789554075807114106/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-38_066_2359801468488363119/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='103' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=103 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-36_974_789554075807114106/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-38_066_2359801468488363119/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1824,11 +1824,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='103' group by value, key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=103 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-40_621_2070392858793462231/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-45_975_5803139738369049064/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='103' group by value, key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=103 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-40_621_2070392858793462231/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-45_975_5803139738369049064/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/create_default_prop.q.out =================================================================== --- ql/src/test/results/clientpositive/create_default_prop.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/create_default_prop.q.out (working copy) @@ -9,7 +9,7 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p1, dbName:default, owner:thiruvel, createTime:1286825949, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p1=v1, transient_lastDdlTime=1286825949, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p1, dbName:default, owner:heyongqiang, createTime:1290111690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p1=v1, transient_lastDdlTime=1290111690, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE table_p2 LIKE table_p1 PREHOOK: type: CREATETABLE POSTHOOK: query: CREATE TABLE table_p2 LIKE table_p1 @@ -21,12 +21,12 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p2, dbName:default, owner:thiruvel, createTime:1286825949, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, p1=v1, transient_lastDdlTime=1286825949, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p2, dbName:default, owner:heyongqiang, createTime:1290111690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290111690}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@table_p1 POSTHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@table_p1 POSTHOOK: Output: default@table_p3 PREHOOK: query: DESC EXTENDED table_p3 @@ -35,4 +35,4 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p3, dbName:default, owner:thiruvel, createTime:1286825953, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p3=v3, transient_lastDdlTime=1286825953}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p3, dbName:default, owner:heyongqiang, createTime:1290111696, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p3=v3, transient_lastDdlTime=1290111696}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/ctas.q.out =================================================================== --- ql/src/test/results/clientpositive/ctas.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/ctas.q.out (working copy) @@ -6,15 +6,15 @@ PREHOOK: query: select * from nzhang_Tmp PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_tmp -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-50_551_5274699533452501897/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_152_6607893365576187007/-mr-10000 POSTHOOK: query: select * from nzhang_Tmp POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_tmp -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-50_551_5274699533452501897/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_152_6607893365576187007/-mr-10000 PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_CTAS1 TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -64,7 +64,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-08-50_746_7813189274106710723/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_294_2534653608830924093/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -92,7 +92,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas1 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas1 Stage: Stage-3 Create Table Operator: @@ -107,20 +107,20 @@ PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_CTAS1 PREHOOK: query: select * from nzhang_CTAS1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-57_963_8224718269640555492/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-54_890_340708639379092873/-mr-10000 POSTHOOK: query: select * from nzhang_CTAS1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-57_963_8224718269640555492/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-54_890_340708639379092873/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -132,9 +132,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas2 TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -184,7 +184,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-08-58_369_8941154562114122989/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-55_113_2975775360233211417/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -212,7 +212,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas2 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas2 Stage: Stage-3 Create Table Operator: @@ -227,20 +227,20 @@ PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas2 PREHOOK: query: select * from nzhang_ctas2 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas2 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-06_188_7275645807830573530/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_092_5599426394139957980/-mr-10000 POSTHOOK: query: select * from nzhang_ctas2 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas2 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-06_188_7275645807830573530/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_092_5599426394139957980/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -252,9 +252,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas3 TOK_LIKETABLE (TOK_TABLESERIALIZER (TOK_SERDENAME "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) TOK_TBLRCFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (/ (TOK_TABLE_OR_COL key) 2) half_key) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_TABLE_OR_COL value) "_con") conb)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL half_key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL conb))) (TOK_LIMIT 10)))) @@ -304,7 +304,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-06_591_311725779652798393/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_269_5024522055775704015/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -332,7 +332,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas3 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas3 Stage: Stage-3 Create Table Operator: @@ -348,20 +348,20 @@ PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas3 PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas3 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-14_409_3860619873030897976/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_253_5950487784027845463/-mr-10000 POSTHOOK: query: select * from nzhang_ctas3 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas3 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-14_409_3860619873030897976/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_253_5950487784027845463/-mr-10000 0.0 val_0_con 0.0 val_0_con 0.0 val_0_con @@ -390,11 +390,11 @@ PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas3 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-15_055_2704821239289766796/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_587_8435204750824479169/-mr-10000 POSTHOOK: query: select * from nzhang_ctas3 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas3 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-15_055_2704821239289766796/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_587_8435204750824479169/-mr-10000 0.0 val_0_con 0.0 val_0_con 0.0 val_0_con @@ -406,9 +406,9 @@ 4.0 val_8_con 4.5 val_9_con PREHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas4 TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ','))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -458,7 +458,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-15_571_515417720676742183/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_831_6665219081002226800/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -486,7 +486,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas4 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas4 Stage: Stage-3 Create Table Operator: @@ -502,20 +502,20 @@ PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas4 PREHOOK: query: select * from nzhang_ctas4 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas4 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-22_874_9020816893750253212/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_599_6843607487274708798/-mr-10000 POSTHOOK: query: select * from nzhang_ctas4 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas4 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-22_874_9020816893750253212/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_599_6843607487274708798/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -527,9 +527,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas5 TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ',') (TOK_TABLEROWFORMATLINES '\012'))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -569,9 +569,9 @@ type: string Needs Tagging: false Path -> Alias: - pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src [src] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src [src] Path -> Partition: - pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src Partition base file name: src input format: org.apache.hadoop.mapred.TextInputFormat @@ -582,12 +582,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src name src serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1284504429 + transient_lastDdlTime 1290111684 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -598,12 +598,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src name src serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1284504429 + transient_lastDdlTime 1290111684 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: src name: src @@ -613,7 +613,7 @@ File Output Operator compressed: false GlobalTableId: 0 - directory: file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + directory: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -629,7 +629,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -645,9 +645,9 @@ type: string Needs Tagging: false Path -> Alias: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 [file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002] + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 [file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002] Path -> Partition: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 Partition base file name: -mr-10002 input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -669,9 +669,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -690,8 +690,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001 - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas5 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas5 Stage: Stage-3 Create Table Operator: @@ -709,10 +709,10 @@ PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas5 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string) @@ -731,10 +731,10 @@ POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nzhang_ctas6 POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nzhang_ctas6 POSTHOOK: Output: default@nzhang_ctas7 POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/input19.q.out =================================================================== --- ql/src/test/results/clientpositive/input19.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/input19.q.out (working copy) @@ -1,6 +1,6 @@ -PREHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE +PREHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE -POSTHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE +POSTHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: default@apachelog PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog @@ -11,9 +11,9 @@ PREHOOK: query: SELECT a.* FROM apachelog a PREHOOK: type: QUERY PREHOOK: Input: default@apachelog -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-26-57_493_8424717254986801325/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_13-03-09_967_1761875918055572728/-mr-10000 POSTHOOK: query: SELECT a.* FROM apachelog a POSTHOOK: type: QUERY POSTHOOK: Input: default@apachelog -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-26-57_493_8424717254986801325/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_13-03-09_967_1761875918055572728/-mr-10000 127.0.0.1 NULL frank 10/Oct/2000:13:55:36 -0700 GET /apache_pb.gif HTTP/1.0 200 2326 Index: ql/src/test/results/clientpositive/merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/merge3.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/merge3.q.out (working copy) @@ -1,13 +1,13 @@ PREHOOK: query: create table merge_src as select key, value from srcpart where ds is not null -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: create table merge_src as select key, value from srcpart where ds is not null -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 @@ -40,11 +40,11 @@ PREHOOK: query: explain extended create table merge_src2 as select key, value from merge_src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain extended create table merge_src2 as select key, value from merge_src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -78,9 +78,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -93,9 +93,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src [merge_src] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src [merge_src] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src Partition base file name: merge_src input format: org.apache.hadoop.mapred.TextInputFormat @@ -106,12 +106,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src name merge_src serialization.ddl struct merge_src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454110 + transient_lastDdlTime 1290113676 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -122,12 +122,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src name merge_src serialization.ddl struct merge_src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454110 + transient_lastDdlTime 1290113676 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src name: merge_src @@ -139,15 +139,15 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 Stage: Stage-0 Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 - destination: pfile:///data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src2 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src2 Stage: Stage-5 Create Table Operator: @@ -163,11 +163,11 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -181,9 +181,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -203,11 +203,11 @@ PREHOOK: query: create table merge_src2 as select key, value from merge_src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@merge_src POSTHOOK: query: create table merge_src2 as select key, value from merge_src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@merge_src POSTHOOK: Output: default@merge_src2 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -217,10 +217,11 @@ PREHOOK: query: select * from merge_src2 PREHOOK: type: QUERY PREHOOK: Input: default@merge_src2 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-09_717_694531801987422048/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-09_570_5014264450775842045/-mr-10000 POSTHOOK: query: select * from merge_src2 POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src2 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-09_570_5014264450775842045/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -2284,9 +2285,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2296,13 +2297,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -2310,10 +2311,10 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [merge_src_part] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [merge_src_part] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 Partition base file name: ds=2008-04-08 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2326,7 +2327,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2336,7 +2337,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2347,7 +2348,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2357,11 +2358,11 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 Partition base file name: ds=2008-04-09 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2374,7 +2375,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2384,7 +2385,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2395,7 +2396,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2405,7 +2406,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part @@ -2417,8 +2418,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 Stage: Stage-0 Move Operator @@ -2426,7 +2427,7 @@ partition: ds replace: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2436,29 +2437,29 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 - tmp directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10001 + tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10001 Stage: Stage-2 Stats-Aggr Operator - Stats Aggregation Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000/ + Stats Aggregation Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000/ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2469,13 +2470,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -2483,9 +2484,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2496,13 +2497,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2513,13 +2514,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 name: merge_src_part2 @@ -2566,11 +2567,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge_src_part2@ds=2008-04-08 PREHOOK: Input: default@merge_src_part2@ds=2008-04-09 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-26_758_4308514117864932351/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-29_875_778506553533012381/-mr-10000 POSTHOOK: query: select * from merge_src_part2 where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src_part2@ds=2008-04-08 POSTHOOK: Input: default@merge_src_part2@ds=2008-04-09 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-29_875_778506553533012381/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -4674,10 +4676,10 @@ type: string Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [s:merge_src_part] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [s:merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [s:merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [s:merge_src_part] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 Partition base file name: ds=2008-04-08 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4690,7 +4692,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4700,7 +4702,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4711,7 +4713,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4721,11 +4723,11 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 Partition base file name: ds=2008-04-09 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4738,7 +4740,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4748,7 +4750,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4759,7 +4761,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4769,7 +4771,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part @@ -4787,9 +4789,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4799,13 +4801,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -4819,8 +4821,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 Stage: Stage-0 Move Operator @@ -4828,7 +4830,7 @@ partition: ds replace: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4838,29 +4840,29 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 - tmp directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10001 + tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10001 Stage: Stage-2 Stats-Aggr Operator - Stats Aggregation Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000/ + Stats Aggregation Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000/ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4871,13 +4873,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -4885,9 +4887,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4898,13 +4900,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4915,13 +4917,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 name: merge_src_part2 @@ -4975,11 +4977,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge_src_part2@ds=2008-04-08 PREHOOK: Input: default@merge_src_part2@ds=2008-04-09 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-43_560_3339774644672299186/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-56_157_6737432571027363960/-mr-10000 POSTHOOK: query: select * from merge_src_part2 where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src_part2@ds=2008-04-08 POSTHOOK: Input: default@merge_src_part2@ds=2008-04-09 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-56_157_6737432571027363960/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/query_result_fileformat.q.out =================================================================== --- ql/src/test/results/clientpositive/query_result_fileformat.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/query_result_fileformat.q.out (working copy) @@ -2,23 +2,23 @@ 1 http://asdf' value from src limit 1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_test1 stored as sequencefile as select 'key1' as key, 'value 1 http://asdf' value from src limit 1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_test1 PREHOOK: query: select * from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_137_345185714437305649/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-18_756_4686540378600379768/-mr-10000 POSTHOOK: query: select * from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_137_345185714437305649/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-18_756_4686540378600379768/-mr-10000 key1 value 1 @@ -26,11 +26,11 @@ PREHOOK: query: select count(*) from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_893_2470605464847588988/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-19_306_9007845540495524130/-mr-10000 POSTHOOK: query: select count(*) from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_893_2470605464847588988/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-19_306_9007845540495524130/-mr-10000 1 PREHOOK: query: explain select * from nzhang_test1 where key='key1' @@ -82,11 +82,11 @@ PREHOOK: query: select * from nzhang_test1 where key='key1' PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-56_447_1539901914223140072/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-27_819_8034778775852480180/-mr-10000 POSTHOOK: query: select * from nzhang_test1 where key='key1' POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-56_447_1539901914223140072/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-27_819_8034778775852480180/-mr-10000 key1 value 1 NULL NULL @@ -94,11 +94,11 @@ PREHOOK: query: select * from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-00_606_2534525216891512327/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-32_891_5081550764397015247/-mr-10000 POSTHOOK: query: select * from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-00_606_2534525216891512327/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-32_891_5081550764397015247/-mr-10000 key1 value 1 @@ -106,11 +106,11 @@ PREHOOK: query: select count(*) from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-02_968_2091791272244763520/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-33_131_934017148846831316/-mr-10000 POSTHOOK: query: select count(*) from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-02_968_2091791272244763520/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-33_131_934017148846831316/-mr-10000 1 PREHOOK: query: explain select * from nzhang_test1 where key='key1' @@ -162,11 +162,11 @@ PREHOOK: query: select * from nzhang_test1 where key='key1' PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-09_247_8932362895617955403/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-40_273_8393892543433536739/-mr-10000 POSTHOOK: query: select * from nzhang_test1 where key='key1' POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-09_247_8932362895617955403/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-40_273_8393892543433536739/-mr-10000 key1 value 1 Index: ql/src/test/results/clientpositive/rcfile_default_format.q.out =================================================================== --- ql/src/test/results/clientpositive/rcfile_default_format.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/rcfile_default_format.q.out (working copy) @@ -9,12 +9,12 @@ POSTHOOK: type: DESCTABLE key string from deserializer -Detailed Table Information Table(tableName:rcfile_default_format, dbName:default, owner:thiruvel, createTime:1286799201, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799201}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format, dbName:default, owner:heyongqiang, createTime:1290106787, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106787}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE rcfile_default_format_ctas AS SELECT key,value FROM src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: CREATE TABLE rcfile_default_format_ctas AS SELECT key,value FROM src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@rcfile_default_format_ctas PREHOOK: query: DESCRIBE EXTENDED rcfile_default_format_ctas @@ -24,7 +24,7 @@ key string from deserializer value string from deserializer -Detailed Table Information Table(tableName:rcfile_default_format_ctas, dbName:default, owner:thiruvel, createTime:1286799204, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format_ctas, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799204}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format_ctas, dbName:default, owner:heyongqiang, createTime:1290106793, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format_ctas, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106793}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE rcfile_default_format_txtfile (key STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE POSTHOOK: query: CREATE TABLE rcfile_default_format_txtfile (key STRING) STORED AS TEXTFILE @@ -46,12 +46,12 @@ POSTHOOK: Lineage: rcfile_default_format_txtfile.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] key string -Detailed Table Information Table(tableName:rcfile_default_format_txtfile, dbName:default, owner:thiruvel, createTime:1286799204, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format_txtfile, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1286799207, numRows=500, totalSize=1906}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format_txtfile, dbName:default, owner:heyongqiang, createTime:1290106794, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format_txtfile, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290106803, numRows=500, totalSize=1906}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE textfile_default_format_ctas AS SELECT key,value FROM rcfile_default_format_ctas -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@rcfile_default_format_ctas POSTHOOK: query: CREATE TABLE textfile_default_format_ctas AS SELECT key,value FROM rcfile_default_format_ctas -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@rcfile_default_format_ctas POSTHOOK: Output: default@textfile_default_format_ctas POSTHOOK: Lineage: rcfile_default_format_txtfile.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -63,4 +63,4 @@ key string value string -Detailed Table Information Table(tableName:textfile_default_format_ctas, dbName:default, owner:thiruvel, createTime:1286799209, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/textfile_default_format_ctas, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799209}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:textfile_default_format_ctas, dbName:default, owner:heyongqiang, createTime:1290106809, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/textfile_default_format_ctas, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106809}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/semijoin.q.out =================================================================== --- ql/src/test/results/clientpositive/semijoin.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/semijoin.q.out (working copy) @@ -1,18 +1,18 @@ PREHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@t1 PREHOOK: query: select * from t1 sort by key PREHOOK: type: QUERY PREHOOK: Input: default@t1 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-35_964_8659428901868696782/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-19_046_3895376692139653549/-mr-10000 POSTHOOK: query: select * from t1 sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-35_964_8659428901868696782/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-19_046_3895376692139653549/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -25,20 +25,20 @@ 9 val_9 10 val_10 PREHOOK: query: create table t2 as select cast(2*key as int) key, value from t1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@t1 POSTHOOK: query: create table t2 as select cast(2*key as int) key, value from t1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 PREHOOK: query: select * from t2 sort by key PREHOOK: type: QUERY PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-44_033_1385561037107185571/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-35_454_4705187275126291053/-mr-10000 POSTHOOK: query: select * from t2 sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-44_033_1385561037107185571/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-35_454_4705187275126291053/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -51,22 +51,22 @@ 18 val_9 20 val_10 PREHOOK: query: create table t3 as select * from (select * from t1 union all select * from t2) b -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 POSTHOOK: query: create table t3 as select * from (select * from t1 union all select * from t2) b -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Output: default@t3 PREHOOK: query: select * from t3 sort by key, value PREHOOK: type: QUERY PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-56_368_4643337669577300642/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-56_521_5858394420847642107/-mr-10000 POSTHOOK: query: select * from t3 sort by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-56_368_4643337669577300642/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-56_521_5858394420847642107/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -97,11 +97,11 @@ PREHOOK: query: select * from t4 PREHOOK: type: QUERY PREHOOK: Input: default@t4 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_347_4261760726152540894/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-05_957_5547541419038535698/-mr-10000 POSTHOOK: query: select * from t4 POSTHOOK: type: QUERY POSTHOOK: Input: default@t4 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_347_4261760726152540894/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-05_957_5547541419038535698/-mr-10000 PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value @@ -185,7 +185,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-00_543_3830170995670719849/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_152_355729472898392348/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -217,12 +217,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_658_5539359917304769093/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_413_6128740965053236566/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_658_5539359917304769093/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_413_6128740965053236566/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -312,7 +312,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-08_284_4216408335407237139/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_265_4466548024067433690/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -344,12 +344,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-08_398_3149624530689509743/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_509_4270214608701232060/-mr-10000 POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-08_398_3149624530689509743/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_509_4270214608701232060/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -441,7 +441,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-16_135_6319067187632899075/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_609_1842300352332686282/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -473,12 +473,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t4 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-16_255_6778253691436748699/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_927_8797191922320768648/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t4 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-16_255_6778253691436748699/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_927_8797191922320768648/-mr-10000 PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value PREHOOK: type: QUERY POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value @@ -568,7 +568,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-24_857_4689023765690259253/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_384_3089194844599355448/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -596,12 +596,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-24_973_3041071756227440132/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_517_5261225666744367069/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-24_973_3041071756227440132/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_517_5261225666744367069/-mr-10000 val_0 val_0 val_0 @@ -708,7 +708,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-34_061_8275163351633601557/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-15_938_7137978578832330282/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -740,12 +740,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-34_179_4205035139034381615/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-16_057_519725218464853726/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-34_179_4205035139034381615/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-16_057_519725218464853726/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -841,7 +841,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-43_183_3107199938637789566/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_044_1882268558166284283/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -869,12 +869,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-43_306_2053779749324913446/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_201_5180642771512887111/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-43_306_2053779749324913446/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_201_5180642771512887111/-mr-10000 val_10 val_8 val_9 @@ -980,7 +980,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-52_335_849441347171062507/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-47_618_6081419597528468690/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1008,12 +1008,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-52_463_737069354382102340/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-48_046_789670215385615891/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-52_463_737069354382102340/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-48_046_789670215385615891/-mr-10000 PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value @@ -1110,7 +1110,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-00_301_5958832649298868830/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_112_1019830060466266544/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1142,12 +1142,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-00_424_4463918460829143379/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_322_221151979084285496/-mr-10000 POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-00_424_4463918460829143379/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_322_221151979084285496/-mr-10000 4 val_2 8 val_4 10 val_5 @@ -1229,7 +1229,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-09_365_1316122738460523098/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-20_944_438303615610048165/-mr-10002 Select Operator expressions: expr: _col0 @@ -1267,12 +1267,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-09_480_7870074811327558689/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-21_127_8134172271972764419/-mr-10000 POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-09_480_7870074811327558689/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-21_127_8134172271972764419/-mr-10000 0 0 0 @@ -1375,7 +1375,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-19_175_5090290266309302820/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_629_5752544729482637710/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1407,12 +1407,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-19_289_7283573667957611898/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_779_3560439419287948511/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-19_289_7283573667957611898/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_779_3560439419287948511/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -1523,7 +1523,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-28_320_7044231815158137622/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_648_373535142060359448/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1560,13 +1560,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-28_450_6078674943204864805/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_983_7977499042899650897/-mr-10000 POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-28_450_6078674943204864805/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_983_7977499042899650897/-mr-10000 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 @@ -1676,7 +1676,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-38_052_8721634945636524946/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_051_7474953441543543987/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1708,12 +1708,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-38_174_5762672611483777652/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_231_2179425812915281444/-mr-10000 POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-38_174_5762672611483777652/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_231_2179425812915281444/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -1838,7 +1838,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-49_190_4624457787442565360/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_573_6260564714806927759/-mr-10002 Select Operator expressions: expr: _col0 @@ -1877,13 +1877,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-49_324_9174162734156458916/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_826_3229491168132326963/-mr-10000 POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-49_324_9174162734156458916/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_826_3229491168132326963/-mr-10000 0 0 0 @@ -1991,7 +1991,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-59_535_8715349469270136515/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_240_4472382832724150377/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2020,13 +2020,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-59_667_8557238197192129755/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_429_8454721176911576485/-mr-10000 POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-59_667_8557238197192129755/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_429_8454721176911576485/-mr-10000 0 0 0 @@ -2146,7 +2146,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-09_255_2715293241143785335/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-12_866_4359353511062609004/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2175,13 +2175,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-09_383_4275977555046345305/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-13_034_2999962254990000638/-mr-10000 POSTHOOK: query: select a.key from t1 a right outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-09_383_4275977555046345305/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-13_034_2999962254990000638/-mr-10000 NULL NULL NULL @@ -2304,7 +2304,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-19_268_5618148586891473846/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_311_4743482924605624788/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2333,13 +2333,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-19_400_3587882397101268241/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_421_7482213301780796952/-mr-10000 POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-19_400_3587882397101268241/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_421_7482213301780796952/-mr-10000 NULL NULL NULL @@ -2462,7 +2462,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-28_728_1492881717279570087/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_118_4112077673076844815/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2491,13 +2491,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-28_856_8757358207361872372/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_554_6913778118057933477/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-28_856_8757358207361872372/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_554_6913778118057933477/-mr-10000 0 0 0 @@ -2620,7 +2620,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-38_076_4685193258029073996/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_032_4567135017384415264/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2649,13 +2649,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-38_205_3988929412565665845/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_146_932092442279199935/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-38_205_3988929412565665845/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_146_932092442279199935/-mr-10000 NULL NULL NULL @@ -2780,7 +2780,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-47_590_2999113315834306029/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_877_648799003197473512/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2809,13 +2809,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-47_722_4677693091558572786/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_976_6198373402363644273/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-47_722_4677693091558572786/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_976_6198373402363644273/-mr-10000 NULL NULL NULL @@ -2984,7 +2984,7 @@ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-57_841_8595738346509621581/-mr-10003 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-33_848_5227894741694043284/-mr-10003 Reduce Output Operator key expressions: expr: _col0 @@ -3013,13 +3013,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-57_976_1019112090965524872/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-34_080_237734989555410900/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-57_976_1019112090965524872/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-34_080_237734989555410900/-mr-10000 0 0 0 Index: ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out =================================================================== --- ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out (working copy) @@ -201,6 +201,40 @@ POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_comment on show_idx_empty +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_comment on show_idx_empty +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_compound on show_idx_empty +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_compound on show_idx_empty +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] PREHOOK: query: DROP TABLE show_idx_empty PREHOOK: type: DROPTABLE PREHOOK: Input: default@show_idx_empty @@ -222,6 +256,40 @@ POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_1 on show_idx_full +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_1 on show_idx_full +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_2 on show_idx_full +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_2 on show_idx_full +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] PREHOOK: query: DROP TABLE show_idx_full PREHOOK: type: DROPTABLE PREHOOK: Input: default@show_idx_full Index: ql/src/test/results/clientpositive/stats10.q.out =================================================================== --- ql/src/test/results/clientpositive/stats10.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats10.q.out (working copy) @@ -123,11 +123,11 @@ PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key PREHOOK: type: QUERY PREHOOK: Input: default@bucket3_1@ds=1 -PREHOOK: Output: file:/tmp/thiruvel/hive_2010-10-11_12-41-36_921_3883563776062594481/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_21-59-35_194_6516705847755484921/-mr-10000 POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket3_1@ds=1 -POSTHOOK: Output: file:/tmp/thiruvel/hive_2010-10-11_12-41-36_921_3883563776062594481/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_21-59-35_194_6516705847755484921/-mr-10000 POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -382,9 +382,9 @@ 498 val_498 1 498 val_498 1 PREHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -411,14 +411,14 @@ PREHOOK: query: analyze table bucket3_1 partition (ds) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@bucket3_1@ds=1 PREHOOK: Input: default@bucket3_1@ds=2 PREHOOK: Output: default@bucket3_1 PREHOOK: Output: default@bucket3_1@ds=1 PREHOOK: Output: default@bucket3_1@ds=2 POSTHOOK: query: analyze table bucket3_1 partition (ds) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket3_1@ds=1 POSTHOOK: Input: default@bucket3_1@ds=2 POSTHOOK: Output: default@bucket3_1 @@ -444,7 +444,7 @@ value string ds string -Detailed Partition Information Partition(values:[1], dbName:default, tableName:bucket3_1, createTime:1286826089, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1/ds=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1286826103, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[1], dbName:default, tableName:bucket3_1, createTime:1290146355, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1/ds=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1290146388, numRows=500, totalSize=5812}) PREHOOK: query: describe extended bucket3_1 partition (ds='2') PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended bucket3_1 partition (ds='2') @@ -459,7 +459,7 @@ value string ds string -Detailed Partition Information Partition(values:[2], dbName:default, tableName:bucket3_1, createTime:1286826096, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1/ds=2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1286826103, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2], dbName:default, tableName:bucket3_1, createTime:1290146374, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1/ds=2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1290146388, numRows=500, totalSize=5812}) PREHOOK: query: describe extended bucket3_1 PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended bucket3_1 @@ -474,4 +474,4 @@ value string ds string -Detailed Table Information Table(tableName:bucket3_1, dbName:default, owner:thiruvel, createTime:1286826085, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=4, transient_lastDdlTime=1286826103, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:bucket3_1, dbName:default, owner:heyongqiang, createTime:1290146344, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=4, transient_lastDdlTime=1290146388, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats12.q.out =================================================================== --- ql/src/test/results/clientpositive/stats12.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats12.q.out (working copy) @@ -30,10 +30,10 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,10 +60,10 @@ GatherStats: true Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 Partition base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat @@ -77,13 +77,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -94,17 +94,17 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 Partition base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat @@ -118,13 +118,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -135,13 +135,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart @@ -152,14 +152,14 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart @@ -190,7 +190,7 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289454661, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289454686, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146533, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146557, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) @@ -208,7 +208,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454677, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454686, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146549, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146557, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) @@ -226,7 +226,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454677, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454686, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146557, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) @@ -244,7 +244,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454678, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454678}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146550}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) @@ -262,4 +262,4 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454678, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454678}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146550}) Index: ql/src/test/results/clientpositive/stats13.q.out =================================================================== --- ql/src/test/results/clientpositive/stats13.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats13.q.out (working copy) @@ -30,10 +30,10 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,9 +60,9 @@ GatherStats: true Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 Partition base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat @@ -76,13 +76,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454688 + transient_lastDdlTime 1290146558 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -93,13 +93,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454688 + transient_lastDdlTime 1290146558 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart @@ -110,12 +110,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -144,7 +144,7 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289454688, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1289454710, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146558, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1290146579, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) @@ -162,7 +162,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454702, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454710, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146572, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146579, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) @@ -180,7 +180,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454703, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454703}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146572, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146572}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) @@ -198,7 +198,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454703, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454703}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146573, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146573}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) @@ -216,7 +216,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454704, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454704}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146573, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146573}) PREHOOK: query: create table analyze_srcpart2 like analyze_srcpart PREHOOK: type: CREATETABLE POSTHOOK: query: create table analyze_srcpart2 like analyze_srcpart @@ -247,4 +247,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart2, dbName:default, owner:null, createTime:1289454711, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1289454711}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart2, dbName:default, owner:null, createTime:1290146580, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1290146580}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats2.q.out =================================================================== --- ql/src/test/results/clientpositive/stats2.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats2.q.out (working copy) @@ -103,11 +103,11 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1289454712, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1289454712}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1290126752, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1290126752}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: explain analyze table analyze_t1 partition (ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_t1 partition (ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -136,7 +136,7 @@ PREHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=12 PREHOOK: Input: default@analyze_t1@ds=2008-04-09/hr=11 @@ -147,7 +147,7 @@ PREHOOK: Output: default@analyze_t1@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_t1@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=12 POSTHOOK: Input: default@analyze_t1@ds=2008-04-09/hr=11 @@ -182,4 +182,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1289454712, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1289454732, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1290126752, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1290126769, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats5.q.out =================================================================== --- ql/src/test/results/clientpositive/stats5.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats5.q.out (working copy) @@ -1,14 +1,14 @@ PREHOOK: query: create table analyze_src as select * from src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table analyze_src as select * from src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@analyze_src PREHOOK: query: explain analyze table analyze_src compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_src compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: (TOK_ANALYZE (TOK_TABTYPE analyze_src)) @@ -29,11 +29,11 @@ PREHOOK: query: analyze table analyze_src compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_src PREHOOK: Output: default@analyze_src POSTHOOK: query: analyze table analyze_src compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_src POSTHOOK: Output: default@analyze_src PREHOOK: query: describe extended analyze_src @@ -43,4 +43,4 @@ key string value string -Detailed Table Information Table(tableName:analyze_src, dbName:default, owner:thiruvel, createTime:1286826326, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/analyze_src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1286826330, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_src, dbName:default, owner:heyongqiang, createTime:1290146681, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290146692, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats6.q.out =================================================================== --- ql/src/test/results/clientpositive/stats6.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats6.q.out (working copy) @@ -29,12 +29,12 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -47,12 +47,12 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 @@ -81,7 +81,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495447, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495455, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146715, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146726, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -99,7 +99,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495460, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146716, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146736, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) @@ -117,7 +117,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289495448}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146716, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146716}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) @@ -135,7 +135,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289495448}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146717, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146717}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -153,4 +153,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495436, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289495460, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146693, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146737, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats7.q.out =================================================================== --- ql/src/test/results/clientpositive/stats7.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats7.q.out (working copy) @@ -29,9 +29,9 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,14 +60,14 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart @@ -98,7 +98,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495472, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495480, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146760, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146772, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -116,7 +116,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495473, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495480, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146761, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146772, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -134,4 +134,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495462, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289495480, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146739, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146772, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats8.q.out =================================================================== --- ql/src/test/results/clientpositive/stats8.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats8.q.out (working copy) @@ -29,9 +29,9 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,12 +60,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -94,7 +94,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495500, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146797, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146810, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -112,11 +112,11 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1289495500, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146774, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1290146810, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -145,12 +145,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 @@ -179,11 +179,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495506, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146820, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -212,12 +212,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -246,11 +246,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495512, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146830, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -279,12 +279,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 @@ -313,11 +313,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495495, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495518, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146800, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146842, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -346,7 +346,7 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -357,7 +357,7 @@ PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -392,7 +392,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495525, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146797, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -410,7 +410,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495525, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) @@ -428,7 +428,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495526, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) @@ -446,7 +446,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495495, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495526, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146800, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -464,4 +464,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1289495526, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146774, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1290146854, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats9.q.out =================================================================== --- ql/src/test/results/clientpositive/stats9.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/stats9.q.out (working copy) @@ -14,9 +14,9 @@ POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: analyze_srcbucket.value SIMPLE [(srcbucket)srcbucket.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain analyze table analyze_srcbucket compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcbucket compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: analyze_srcbucket.value SIMPLE [(srcbucket)srcbucket.FieldSchema(name:value, type:string, comment:null), ] ABSTRACT SYNTAX TREE: @@ -39,11 +39,11 @@ PREHOOK: query: analyze table analyze_srcbucket compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcbucket PREHOOK: Output: default@analyze_srcbucket POSTHOOK: query: analyze table analyze_srcbucket compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcbucket POSTHOOK: Output: default@analyze_srcbucket POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] @@ -57,4 +57,4 @@ key int value string -Detailed Table Information Table(tableName:analyze_srcbucket, dbName:default, owner:thiruvel, createTime:1286826500, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/analyze_srcbucket, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, EXTERNAL=FALSE, numFiles=1, transient_lastDdlTime=1286826508, numRows=1000, totalSize=11603}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcbucket, dbName:default, owner:heyongqiang, createTime:1290146857, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcbucket, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290146885, numRows=1000, totalSize=11603}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/str_to_map.q.out =================================================================== --- ql/src/test/results/clientpositive/str_to_map.q.out (revision 1036686) +++ ql/src/test/results/clientpositive/str_to_map.q.out (working copy) @@ -48,11 +48,11 @@ PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-48_939_5414753236298860779/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-47_081_185807984859506518/-mr-10000 POSTHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-48_939_5414753236298860779/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-47_081_185807984859506518/-mr-10000 1 1 1 @@ -95,11 +95,11 @@ PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-51_828_6905327639418151142/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-53_276_628458177270710173/-mr-10000 POSTHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-51_828_6905327639418151142/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-53_276_628458177270710173/-mr-10000 {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} @@ -142,11 +142,11 @@ PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-54_404_1637219732245487353/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-00_242_6299210516765000168/-mr-10000 POSTHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-54_404_1637219732245487353/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-00_242_6299210516765000168/-mr-10000 {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} @@ -205,13 +205,13 @@ limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-56_995_8619552318902310354/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-08_090_5560348263638743320/-mr-10000 POSTHOOK: query: select str_to_map(t.ss,',',':')['a'] from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-56_995_8619552318902310354/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-08_090_5560348263638743320/-mr-10000 1 1 1 @@ -220,20 +220,20 @@ POSTHOOK: query: drop table tbl_s2m POSTHOOK: type: DROPTABLE PREHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src limit 3 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src limit 3 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl_s2m PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m PREHOOK: type: QUERY PREHOOK: Input: default@tbl_s2m -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-49-02_576_7096389194927995175/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-26_274_4931967655402208603/-mr-10000 POSTHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_s2m -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-49-02_576_7096389194927995175/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-26_274_4931967655402208603/-mr-10000 444 444 444