Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java =================================================================== --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1050266) +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy) @@ -325,6 +325,13 @@ SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook",null), + HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false), + HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager", null), + HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager", null), + + HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.table.user.grants", null), + HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.table.group.grants", null), + HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.table.role.grants", null), // Print column names in output HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false), Index: conf/hive-default.xml =================================================================== --- conf/hive-default.xml (revision 1050266) +++ conf/hive-default.xml (working copy) @@ -834,6 +834,57 @@ + hive.security.authorization.enabled + false + enable or disable the hive client authorization + + + + hive.security.authorization.manager + org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider + the hive client authorization manager class name. + The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.AuthorizationProviderManager. + + + + + hive.security.authenticator.manager + org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator + hive client authenticator manager class name. + The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.Authenticator. + + + + hive.security.authorization.table.user.grants + + the privileges automatically granted to some users whenever a table gets created. + An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY, + and grant create privilege to userZ whenever a new table created. + + + + hive.security.authorization.table.group.grants + + the privileges automatically granted to some groups whenever a table gets created. + An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY, + and grant create privilege to groupZ whenever a new table created. + + + + hive.security.authorization.table.role.grants + + the privileges automatically granted to some roles whenever a table gets created. + An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY, + and grant create privilege to roleZ whenever a new table created. + + + + hive.variable.substitute + true + This enables substitution using syntax like ${var} ${system:var} and ${env:var}. + + + hive.error.on.empty.partition false Whether to throw an excpetion if dynamic partition insert generates empty results. Index: metastore/if/hive_metastore.thrift =================================================================== --- metastore/if/hive_metastore.thrift (revision 1050266) +++ metastore/if/hive_metastore.thrift (working copy) @@ -29,12 +29,67 @@ 4: optional list fields // if the name is one of the user defined types } +enum HiveObjectType { + GLOBAL = 1, + DATABASE = 2, + TABLE = 3, + PARTITION = 4, + COLUMN = 5, +} + +enum PrincipalType { + USER = 1, + ROLE = 2, + GROUP = 3, +} + +struct HiveObjectRef{ + 1: HiveObjectType objectType, + 2: string dbName, + 3: string objectName, + 4: list partValues, + 5: string columnName, +} + +struct PrivilegeGrantInfo { + 1: string privilege, + 2: i32 createTime, + 3: string grantor, + 4: PrincipalType grantorType, + 5: bool grantOption, +} + +struct HiveObjectPrivilege { + 1: HiveObjectRef hiveObject, + 2: string principalName, + 3: PrincipalType principalType, + 4: PrivilegeGrantInfo grantInfo, +} + +struct PrivilegeBag { + 1: list privileges, +} + +struct PrincipalPrivilegeSet { + 1: map> userPrivileges, // user name -> privilege grant info + 2: map> groupPrivileges, // group name -> privilege grant info + 3: map> rolePrivileges, //role name -> privilege grant info +} + +struct Role { + 1: string roleName, + 2: i32 createTime, + 3: string ownerName, + 4: map parameters, // properties associated with the database +} + // namespace for tables struct Database { 1: string name, 2: string description, 3: string locationUri, - 4: map parameters // properties associated with the database + 4: map parameters, // properties associated with the database + 5: optional PrincipalPrivilegeSet privileges } // This object holds the information needed by SerDes @@ -77,7 +132,8 @@ 9: map parameters, // to store comments or any other user level parameters 10: string viewOriginalText, // original view text, null for non-view 11: string viewExpandedText, // expanded view text, null for non-view - 12: string tableType // table type enum, e.g. EXTERNAL_TABLE + 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + 13: optional PrincipalPrivilegeSet privileges, } struct Partition { @@ -87,7 +143,8 @@ 4: i32 createTime, 5: i32 lastAccessTime, 6: StorageDescriptor sd, - 7: map parameters + 7: map parameters, + 8: optional PrincipalPrivilegeSet privileges } struct Index { @@ -110,7 +167,6 @@ 2: map properties } - exception MetaException { 1: string message } @@ -209,6 +265,10 @@ throws(1:NoSuchObjectException o1, 2:MetaException o2) Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, + 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) + Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) throws(1:MetaException o1, 2:NoSuchObjectException o2) @@ -216,6 +276,9 @@ // If max parts is given then it will return only that many. list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, + 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:MetaException o2) @@ -228,6 +291,9 @@ list get_partitions_ps(1:string db_name 2:string tbl_name 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1) + list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, + 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_partition_names_ps(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1) @@ -272,6 +338,28 @@ throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) throws(1:MetaException o2) + + //authorization privileges + + bool create_role(1: string role_name, 2: string owner_name) throws(1:MetaException o1) + + bool drop_role(1: string role_name) throws(1:MetaException o1) + + bool add_role_member (1: string role_name, 2: string principal_name, 3: PrincipalType principal_type) throws(1:MetaException o1) + + bool remove_role_member (1: string role_name, 2: string principal_name, 3: PrincipalType principal_type) throws(1:MetaException o1) + + list list_roles(1: string principal_name, 2: PrincipalType principal_type) throws(1:MetaException o1) + + PrincipalPrivilegeSet get_privilege_set(1: HiveObjectRef hiveObject, 5: string user_name, 6: list group_names) throws(1:MetaException o1) + + list list_privileges(1: string principal_name, 2: PrincipalType principal_type, 3: HiveObjectRef hiveObject) throws(1:MetaException o1) + + bool grant_privileges (1: PrivilegeBag privileges) throws(1:MetaException o1) + bool revoke_privileges (1:PrivilegeBag privileges) throws(1:MetaException o1) + + bool revoke_all_privileges (1: string user_name, 2: PrincipalType principal_type, 3: bool remove_user_priv, 4: list dbs, + 5: list tables, 6: list parts, 7: map> columns) throws(1:MetaException o1) } // * Note about the DDL_TIME: When creating or altering a table or a partition, Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (revision 1050266) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (working copy) @@ -41,6 +41,9 @@ import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.IndexAlreadyExistsException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -48,12 +51,23 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook; +import org.apache.hadoop.hive.metastore.model.MColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MDBPrivilege; +import org.apache.hadoop.hive.metastore.model.MRole; +import org.apache.hadoop.hive.metastore.model.MTablePartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; +import org.apache.hadoop.hive.metastore.model.MRoleMap; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; @@ -1315,6 +1329,35 @@ } return ret; } + + @Override + public Partition get_partition_with_auth(final String db_name, + final String tbl_name, final List part_vals, + final String user_name, final List group_names) + throws MetaException, NoSuchObjectException, TException { + incrementCounter("get_partition_with_auth"); + logStartPartitionFunction("get_partition_with_auth", db_name, tbl_name, + part_vals); + + Partition ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Partition run(RawStore ms) throws Exception { + return ms.getPartitionWithAuth(db_name, tbl_name, part_vals, + user_name, group_names); + } + }); + } catch (MetaException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + assert (e instanceof RuntimeException); + throw (RuntimeException) e; + } + return ret; + } public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { @@ -1340,6 +1383,35 @@ return ret; } + + @Override + public List get_partitions_with_auth(final String dbName, + final String tblName, final short maxParts, final String userName, + final List groupNames) throws NoSuchObjectException, + MetaException, TException { + incrementCounter("get_partitions_with_auth"); + logStartTableFunction("get_partitions_with_auth", dbName, tblName); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + return ms.getPartitionsWithAuth(dbName, tblName, maxParts, + userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + assert (e instanceof RuntimeException); + throw (RuntimeException) e; + } + return ret; + + } public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws MetaException { @@ -1777,10 +1849,24 @@ @Override public List get_partitions_ps(final String db_name, - final String tbl_name, final List part_vals, final short max_parts) - throws MetaException, TException { + final String tbl_name, final List part_vals, + final short max_parts) throws MetaException, TException { incrementCounter("get_partitions_ps"); - logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); + logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, + part_vals); + + return this.get_partitions_ps_with_auth(db_name, tbl_name, part_vals, + max_parts, null, null); + } + + @Override + public List get_partitions_ps_with_auth(final String db_name, + final String tbl_name, final List part_vals, + final short max_parts, final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_partitions_ps"); + logStartPartitionFunction("get_partitions_ps", db_name, tbl_name, + part_vals); List parts = null; List matchingParts = new ArrayList(); @@ -2134,6 +2220,577 @@ return ret; } + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + String partName = getPartName(hiveObject); + return this.get_column_privilege_set(hiveObject.getDbName(), hiveObject + .getObjectName(), partName, hiveObject.getColumnName(), userName, + groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + String partName = getPartName(hiveObject); + return this.get_partition_privilege_set(hiveObject.getDbName(), + hiveObject.getObjectName(), partName, userName, groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + return this.get_db_privilege_set(hiveObject.getDbName(), userName, + groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + return this.get_table_privilege_set(hiveObject.getDbName(), hiveObject + .getObjectName(), userName, groupNames); + } + return null; + } + + private String getPartName(HiveObjectRef hiveObject) throws MetaException { + String partName = null; + List partValue = hiveObject.getPartValues(); + if (partValue != null && partValue.size() > 0) { + try { + Table table = get_table(hiveObject.getDbName(), hiveObject + .getObjectName()); + partName = Warehouse + .makePartName(table.getPartitionKeys(), partValue); + } catch (NoSuchObjectException e) { + throw new RuntimeException(e); + } + } + return partName; + } + + public PrincipalPrivilegeSet get_column_privilege_set(final String dbName, + final String tableName, final String partName, final String columnName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_column_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getColumnPrivilegeSet(dbName, tableName, partName, columnName, userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public PrincipalPrivilegeSet get_db_privilege_set(final String dbName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_db_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getDBPrivilegeSet(dbName, userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public PrincipalPrivilegeSet get_partition_privilege_set( + final String dbName, final String tableName, final String partName, + final String userName, final List groupNames) + throws MetaException, TException { + incrementCounter("get_partition_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getPartitionPrivilegeSet(dbName, tableName, partName, + userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public PrincipalPrivilegeSet get_table_privilege_set(final String dbName, + final String tableName, final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_table_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getTablePrivilegeSet(dbName, tableName, userName, + groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean add_role_member(final String roleName, + final String userName, final PrincipalType principalType) + throws MetaException, TException { + incrementCounter("add_role_member"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + Role role = ms.getRole(roleName); + return ms.addRoleMember(role, userName, principalType); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_roles(final String principalName, + final PrincipalType principalType) throws MetaException, TException { + incrementCounter("list_roles"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List result = new ArrayList(); + List roleMap = ms.listRoles(principalName,principalType); + if (roleMap!=null) { + for (MRoleMap role : roleMap) { + MRole r = role.getRole(); + result.add(new Role(r.getRoleName(), r + .getCreateTime(), r.getOwnerName(), null)); + } + } + return result; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean create_role(final String roleName, final String ownerName) + throws MetaException, TException { + incrementCounter("create_role"); + + Boolean ret = null; + try { + + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.addRole(roleName, ownerName); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean drop_role(final String roleName) + throws MetaException, TException { + incrementCounter("drop_role"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.removeRole(roleName); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean grant_privileges(final PrivilegeBag privileges) throws MetaException, + TException { + incrementCounter("grant_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.grantPrivileges(privileges); + } + }); + } catch (MetaException e) { + e.printStackTrace(); + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean remove_role_member(final String roleName, final String userName, + final PrincipalType principalType) throws MetaException, TException { + incrementCounter("remove_role_member"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + Role mRole = ms.getRole(roleName); + return ms.removeRoleMember(mRole, userName, principalType); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean revoke_privileges(final PrivilegeBag privileges) + throws MetaException, TException { + incrementCounter("revoke_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.revokePrivileges(privileges); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public PrincipalPrivilegeSet get_user_privilege_set(final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_user_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + PrincipalPrivilegeSet run(RawStore ms) throws Exception { + return ms.getUserPrivilegeSet(userName, groupNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean revoke_all_privileges(final String userName, + final PrincipalType principalType, + final boolean removeUserPriv, final List dbs, + final List
tables, final List parts, + final Map> columns) throws MetaException, + TException { + incrementCounter("revoke_all_privileges"); + + Boolean ret = null; + try { + ret = executeWithRetry(new Command() { + @Override + Boolean run(RawStore ms) throws Exception { + return ms.revokeAllPrivileges(userName, principalType, + removeUserPriv, dbs, tables, parts, columns); + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public PrincipalType getPrincipalType (String principalType) { + return PrincipalType.valueOf(principalType); + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + return this.list_global_privileges(principalName, principalType); + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + return this.list_db_privileges(principalName, principalType, hiveObject + .getDbName()); + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + return this.list_table_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName()); + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + return this.list_partition_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + .getPartValues()); + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + return this.list_column_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + .getPartValues(), hiveObject.getColumnName()); + } + return null; + } + + public List list_column_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName, final List partValues, + final String columnName) throws MetaException, TException { + incrementCounter("list_security_column_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + String partName = null; + if (partValues != null && partValues.size()>0) { + Table tbl = get_table(dbName, tableName); + partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + } + List mCols = ms.listMSecurityTabOrPartColumnGrant(principalName, + principalType, dbName, tableName, partName, columnName); + Partition part = null; + if (partName != null) { + part = get_partition_by_name(dbName, tableName, partName); + } + + if (mCols.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mCols.size(); i++) { + MColumnPrivilege sCol = mCols.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.COLUMN, dbName, tableName, + part == null ? null : part.getValues(), sCol + .getColumnName()); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sCol.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sCol.getPrivilege(), sCol + .getCreateTime(), sCol.getGrantor(), PrincipalType + .valueOf(sCol.getGrantorType()), sCol.getGrantOption())); + result.add(secObj); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_db_privileges(final String principalName, + final PrincipalType principalType, final String dbName) + throws MetaException, TException { + incrementCounter("list_security_db_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mDbs = ms.listMSecurityPrincipalDBGrant( + principalName, principalType, dbName); + if (mDbs.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mDbs.size(); i++) { + MDBPrivilege sDB = mDbs.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.DATABASE, dbName, null, null, null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sDB.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sDB.getPrivilege(), sDB + .getCreateTime(), sDB.getGrantor(), PrincipalType + .valueOf(sDB.getGrantorType()), sDB.getGrantOption())); + result.add(secObj); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_partition_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName, final List partValues) + throws MetaException, TException { + incrementCounter("list_security_partition_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + Table tbl = get_table(dbName, tableName); + String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + List mParts = ms + .listMSecurityPrincipalPartitionGrant(principalName, principalType, dbName, tableName, partName); + if (mParts.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mParts.size(); i++) { + MTablePartitionPrivilege sPart = mParts.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.PARTITION, dbName, tableName, partValues, + null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sPart.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sPart.getPrivilege(), sPart + .getCreateTime(), sPart.getGrantor(), PrincipalType + .valueOf(sPart.getGrantorType()), sPart + .getGrantOption())); + + result.add(secObj); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_table_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName) throws MetaException, + TException { + incrementCounter("list_security_table_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mTbls = ms + .listMSecurityPrincipalTableGrant(principalName, principalType, dbName, tableName); + if (mTbls.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mTbls.size(); i++) { + MTablePartitionPrivilege sTbl = mTbls.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.TABLE, dbName, tableName, null,null); + HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, + sTbl.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sTbl.getPrivilege(), sTbl.getCreateTime(), sTbl + .getGrantor(), PrincipalType.valueOf(sTbl + .getGrantorType()), sTbl.getGrantOption())); + result.add(secObj); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + public List list_global_privileges( + final String principalName, final PrincipalType principalType) + throws MetaException, TException { + incrementCounter("list_security_user_grant"); + + List ret = null; + try { + ret = executeWithRetry(new Command>() { + @Override + List run(RawStore ms) throws Exception { + List mUsers = ms.listMSecurityPrincipalUserGrant( + principalName, principalType); + if (mUsers.size() > 0) { + List result = new ArrayList(); + for (int i = 0; i < mUsers.size(); i++) { + MGlobalPrivilege sUsr = mUsers.get(i); + HiveObjectRef objectRef = new HiveObjectRef( + HiveObjectType.GLOBAL, null, null, null, null); + HiveObjectPrivilege secUser = new HiveObjectPrivilege( + objectRef, sUsr.getPrincipalName(), principalType, + new PrivilegeGrantInfo(sUsr.getPrivilege(), sUsr + .getCreateTime(), sUsr.getGrantor(), PrincipalType + .valueOf(sUsr.getGrantorType()), sUsr.getGrantOption())); + result.add(secUser); + } + return result; + } + return null; + } + }); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + } /** Index: metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (revision 1050266) +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (working copy) @@ -27,6 +27,8 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,12 +37,19 @@ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.api.Type; @@ -542,6 +551,23 @@ return deepCopyPartitions( client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts)); } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, short max_parts, String user_name, List group_names) + throws NoSuchObjectException, MetaException, TException { + return deepCopyPartitions( + client.get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, List part_vals, short max_parts, + String user_name, List group_names) throws NoSuchObjectException, + MetaException, TException { + return deepCopyPartitions(client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names)); + } /** * Get list of partitions matching specified filter @@ -591,6 +617,14 @@ List part_vals) throws NoSuchObjectException, MetaException, TException { return deepCopy(client.get_partition(db_name, tbl_name, part_vals)); } + + @Override + public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, + List part_vals, String user_name, List group_names) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException { + return deepCopy(client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)); + } /** * @param name @@ -832,12 +866,30 @@ */ private Partition deepCopy(Partition partition) { Partition copy = null; + if (partition.getPrivileges()!= null) { + setEmptyGrantList(partition + .getPrivileges().getUserPrivileges()); + setEmptyGrantList(partition + .getPrivileges().getGroupPrivileges()); + setEmptyGrantList(partition + .getPrivileges().getRolePrivileges()); + } if (partition != null) { copy = new Partition(partition); } return copy; } + private void setEmptyGrantList(Map> map) { + if (map != null) { + for (Map.Entry> element : map.entrySet()) { + if (element.getValue() == null) { + element.setValue(new ArrayList(0)); + } + } + } + } + private Database deepCopy(Database database) { Database copy = null; if (database != null) { @@ -918,4 +970,68 @@ return client.drop_index_by_name(dbName, tblName, name, deleteData); } + @Override + public boolean add_role_member(String roleName, String userName, + PrincipalType principalType) throws MetaException, TException { + return client.add_role_member(roleName, userName, principalType); + } + + @Override + public boolean create_role(String roleName, String ownerName) + throws MetaException, TException { + return client.create_role(roleName, ownerName); + } + + @Override + public boolean drop_role(String roleName) throws MetaException, TException { + return client.drop_role(roleName); + } + + @Override + public List list_roles(String principalName, + PrincipalType principalType) throws MetaException, TException { + return client.list_roles(principalName, principalType); + } + + @Override + public boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException { + return client.grant_privileges(privileges); + } + + @Override + public boolean remove_role_member(String roleName, String userName, + PrincipalType principalType) throws MetaException, TException { + return client.remove_role_member(roleName, userName, principalType); + } + + @Override + public boolean revoke_all_privileges(String userName, + PrincipalType principalType, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException, TException { + return client.revoke_all_privileges(userName, principalType, + removeUserPriv, dbs, tables, parts, columns); + } + + @Override + public boolean revoke_privileges(PrivilegeBag privileges) throws MetaException, + TException { + return client.revoke_privileges(privileges); + } + + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + return client.get_privilege_set(hiveObject, userName, groupNames); + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + return client.list_privileges(principalName, principalType, hiveObject); + } + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (revision 1050266) +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (working copy) @@ -25,12 +25,18 @@ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; @@ -263,6 +269,23 @@ public Partition getPartition(String dbName, String tblName, String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * @param dbName + * @param tableName + * @param pvals + * @param userName + * @param groupNames + * @return + * @throws MetaException + * @throws UnknownTableException + * @throws NoSuchObjectException + * @throws TException + */ + public Partition getPartitionWithAuthInfo(String dbName, String tableName, + List pvals, String userName, List groupNames) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + /** * @param tbl_name * @param db_name @@ -285,6 +308,33 @@ List part_vals, short max_parts) throws MetaException, TException; /** + * @param dbName + * @param tableName + * @param s + * @param userName + * @param groupNames + * @return + * @throws NoSuchObjectException + */ + public List listPartitionsWithAuthInfo(String dbName, + String tableName, short s, String userName, List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * @param dbName + * @param tableName + * @param partialPvals + * @param s + * @param userName + * @param groupNames + * @return + * @throws NoSuchObjectException + */ + public List listPartitionsWithAuthInfo(String dbName, + String tableName, List partialPvals, short s, String userName, + List groupNames) throws MetaException, TException, NoSuchObjectException; + + /** * @param tbl * @throws AlreadyExistsException * @throws InvalidObjectException @@ -478,4 +528,130 @@ public boolean dropIndex(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; + + /** + * @param role_name + * role name + * @param owner_name + * owner name + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean create_role(String role_name, String owner_name) + throws MetaException, TException; + + /** + * @param role_name + * role name + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean drop_role(String role_name) throws MetaException, TException; + + /** + * @param role_name + * role name + * @param user_name + * user name + * @param principalType + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean add_role_member(String role_name, String user_name, + PrincipalType principalType) throws MetaException, TException; + + /** + * @param role_name + * role name + * @param user_name + * user name + * @param principalType + * @param db_name + * + * @return + * @throws MetaException + * @throws TException + */ + public boolean remove_role_member(String role_name, String user_name, + PrincipalType principalType) throws MetaException, TException; + + /** + * + * @param principalName + * @param principalType + * @return + * @throws MetaException + * @throws TException + */ + public List list_roles(String principalName, PrincipalType principalType) + throws MetaException, TException; + + /** + * @param hiveObject + * @param user_name + * @param group_names + * @return + * @throws MetaException + * @throws TException + */ + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String user_name, List group_names) throws MetaException, + TException; + + /** + * @param principal_name + * @param principal_type + * @param hiveObject + * @return + * @throws MetaException + * @throws TException + */ + public List list_privileges(String principal_name, + PrincipalType principal_type, HiveObjectRef hiveObject) + throws MetaException, TException; + + /** + * @param privileges + * @return + * @throws MetaException + * @throws TException + */ + public boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException; + + /** + * @param privileges + * @return + * @throws MetaException + * @throws TException + */ + public boolean revoke_privileges(PrivilegeBag privileges) + throws MetaException, TException; + + /** + * @param userName + * @param principalType + * @param removeUserPriv + * @param dbs + * @param tables + * @param parts + * @param columns + * @return + * @throws MetaException + * @throws TException + */ + boolean revoke_all_privileges(String userName, PrincipalType principalType, + boolean removeUserPriv, List dbs, List
tables, + List parts, Map> columns) + throws MetaException, TException; + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (revision 1050266) +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (working copy) @@ -21,11 +21,13 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Properties; +import java.util.Set; import java.util.Map.Entry; +import java.util.Properties; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -44,15 +46,24 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; @@ -62,6 +73,12 @@ import org.apache.hadoop.hive.metastore.model.MIndex; import org.apache.hadoop.hive.metastore.model.MOrder; import org.apache.hadoop.hive.metastore.model.MPartition; +import org.apache.hadoop.hive.metastore.model.MColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MDBPrivilege; +import org.apache.hadoop.hive.metastore.model.MRole; +import org.apache.hadoop.hive.metastore.model.MTablePartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; +import org.apache.hadoop.hive.metastore.model.MRoleMap; import org.apache.hadoop.hive.metastore.model.MSerDeInfo; import org.apache.hadoop.hive.metastore.model.MStorageDescriptor; import org.apache.hadoop.hive.metastore.model.MTable; @@ -527,6 +544,68 @@ openTransaction(); MTable mtbl = convertToMTable(tbl); pm.makePersistent(mtbl); + PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); + List toPersistPrivObjs = new ArrayList(); + if (principalPrivs != null) { + int now = (int)(System.currentTimeMillis()/1000); + Map> userPrivs = principalPrivs.getUserPrivileges(); + if (userPrivs != null) { + for (Map.Entry> entry: userPrivs.entrySet()) { + String principalName = entry.getKey(); + List privs = entry.getValue(); + for (int i = 0; i < privs.size(); i++) { + PrivilegeGrantInfo priv = privs.get(i); + if (priv == null) { + continue; + } + MTablePartitionPrivilege mTblSec = new MTablePartitionPrivilege( + principalName, PrincipalType.USER.toString(), mtbl, null, + priv.getPrivilege(), now, priv.getGrantor(), priv.getGrantorType().toString(), + priv.isGrantOption()); + toPersistPrivObjs.add(mTblSec); + } + } + } + Map> groupPrivs = principalPrivs.getGroupPrivileges(); + if (groupPrivs != null) { + for (Map.Entry> entry : groupPrivs + .entrySet()) { + String principalName = entry.getKey(); + List privs = entry.getValue(); + for (int i = 0; i < privs.size(); i++) { + PrivilegeGrantInfo priv = privs.get(i); + if (priv == null) { + continue; + } + MTablePartitionPrivilege mTblSec = new MTablePartitionPrivilege( + principalName, PrincipalType.GROUP.toString(), mtbl, null, + priv.getPrivilege(), now, priv.getGrantor(), priv + .getGrantorType().toString(), priv.isGrantOption()); + toPersistPrivObjs.add(mTblSec); + } + } + } + Map> rolePrivs = principalPrivs.getRolePrivileges(); + if (rolePrivs != null) { + for (Map.Entry> entry : rolePrivs + .entrySet()) { + String principalName = entry.getKey(); + List privs = entry.getValue(); + for (int i = 0; i < privs.size(); i++) { + PrivilegeGrantInfo priv = privs.get(i); + if (priv == null) { + continue; + } + MTablePartitionPrivilege mTblSec = new MTablePartitionPrivilege( + principalName, PrincipalType.ROLE.toString(), mtbl, null, + priv.getPrivilege(), now, priv.getGrantor(), priv + .getGrantorType().toString(), priv.isGrantOption()); + toPersistPrivObjs.add(mTblSec); + } + } + } + } + pm.makePersistentAll(toPersistPrivObjs); commited = commitTransaction(); } finally { if (!commited) { @@ -543,6 +622,16 @@ pm.retrieve(tbl); if (tbl != null) { // first remove all the partitions + List tabParts = listMSecurityTablePart(dbName, + tableName, false); + if (tabParts != null && tabParts.size() > 0) { + pm.deletePersistentAll(tabParts); + } + List colGrants = listMSecurityTablePartColumn(dbName, + tableName, false); + if (colGrants != null && colGrants.size() > 0) { + pm.deletePersistentAll(colGrants); + } pm.deletePersistentAll(listMPartitions(dbName, tableName, -1)); // then remove the table pm.deletePersistent(tbl); @@ -555,7 +644,7 @@ } return success; } - + public Table getTable(String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; @@ -791,9 +880,45 @@ boolean success = false; boolean commited = false; try { + MTable table = this.getMTable(part.getDbName(), part.getTableName()); + List tabGrants = null; + List tabColumnGrants = null; + if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { + tabGrants = this.listMSecurityTablePart(part + .getDbName(), part.getTableName(), true); + tabColumnGrants = this.listMSecurityTablePartColumn( + part.getDbName(), part.getTableName(), true); + } openTransaction(); MPartition mpart = convertToMPart(part); pm.makePersistent(mpart); + + int now = (int)(System.currentTimeMillis()/1000); + List toPersist = new ArrayList(); + if (tabGrants != null) { + for (MTablePartitionPrivilege tab: tabGrants) { + MTablePartitionPrivilege partGrant = new MTablePartitionPrivilege(tab + .getPrincipalName(), tab.getPrincipalType(), tab.getTable(), + mpart, tab.getPrivilege(), now, tab.getGrantor(), tab + .getGrantorType(), tab.getGrantOption()); + toPersist.add(partGrant); + } + } + + if (tabColumnGrants != null) { + for (MColumnPrivilege col : tabColumnGrants) { + MColumnPrivilege partColumn = new MColumnPrivilege(col + .getPrincipalName(), col.getPrincipalType(), col + .getTable(), mpart, col.getColumnName(), col.getPrivilege(), + now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()); + toPersist.add(partColumn); + } + + if (toPersist.size() > 0) { + pm.makePersistentAll(toPersist); + } + } + commited = commitTransaction(); success = true; } finally { @@ -810,10 +935,42 @@ Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); commitTransaction(); if(part == null) { - throw new NoSuchObjectException(); + throw new NoSuchObjectException("partition values=" + + part_vals.toString()); } return part; } + + @Override + public Partition getPartitionWithAuth(String dbName, String tblName, + List partVals, String user_name, List group_names) + throws NoSuchObjectException, MetaException, InvalidObjectException { + boolean success = false; + try { + openTransaction(); + MPartition mpart = getMPartition(dbName, tblName, partVals); + if (mpart == null) { + commitTransaction(); + throw new NoSuchObjectException("partition values=" + + partVals.toString()); + } + Partition part = null; + MTable mtbl = mpart.getTable(); + part = convertToPart(mpart); + String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl + .getPartitionKeys()), partVals); + success = commitTransaction(); + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + tblName, partName, user_name, group_names); + part.setPrivileges(partAuth); + return part; + } finally { + if (!success) { + rollbackTransaction(); + } + } + + } private MPartition getMPartition(String dbName, String tableName, List part_vals) throws MetaException { @@ -880,6 +1037,26 @@ openTransaction(); MPartition part = getMPartition(dbName, tableName, part_vals); if (part != null) { + List schemas = part.getTable().getPartitionKeys(); + List colNames = new ArrayList(); + for (MFieldSchema col: schemas) { + colNames.add(col.getName()); + } + String partName = FileUtils.makePartName(colNames, part_vals); + + List partGrants = listMSecurityPartition( + dbName, tableName, partName); + + if (partGrants != null && partGrants.size() > 0) { + pm.deletePersistentAll(partGrants); + } + + List partColumnGrants = listMSecurityPartitionColumn( + dbName, tableName, partName); + if (partColumnGrants != null && partColumnGrants.size() > 0) { + pm.deletePersistentAll(partColumnGrants); + } + pm.deletePersistent(part); } success = commitTransaction(); @@ -899,6 +1076,35 @@ commitTransaction(); return parts; } + + @Override + public List getPartitionsWithAuth(String dbName, String tblName, + short maxParts, String userName, List groupNames) + throws MetaException, NoSuchObjectException, InvalidObjectException { + boolean success = false; + try { + openTransaction(); + List mparts = listMPartitions(dbName, tblName, maxParts); + List parts = new ArrayList(mparts.size()); + if (mparts != null && mparts.size()>0) { + for (MPartition mpart : mparts) { + MTable mtbl = mpart.getTable(); + Partition part = convertToPart(mpart); + String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl + .getPartitionKeys()), part.getValues()); + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + tblName, partName, userName, groupNames); + part.setPrivileges(partAuth); + } + } + success = commitTransaction(); + return parts; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } private List convertToParts(List mparts) throws MetaException { @@ -1389,4 +1595,1384 @@ } return pns; } + + @Override + public boolean addRole(String roleName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean success = false; + boolean commited = false; + try { + openTransaction(); + MRole nameCheck = this.getMRole(roleName); + if (nameCheck != null) { + throw new RuntimeException("Role " + roleName + " already exists."); + } + int now = (int)(System.currentTimeMillis()/1000); + MRole mRole = new MRole(roleName, now, + ownerName); + pm.makePersistent(mRole); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean addRoleMember(Role role, String userName,PrincipalType principalType) throws MetaException, NoSuchObjectException { + boolean success = false; + boolean commited = false; + try { + MRoleMap roleMap = null; + try { + roleMap = this.getMSecurityUserRoleMap(userName, principalType, role + .getRoleName()); + } catch (Exception e) { + } + if (roleMap != null) { + throw new RuntimeException("Principal " + userName + + " already has the role " + role.getRoleName()); + } + openTransaction(); + MRole mRole = getMRole(role.getRoleName()); + long now = System.currentTimeMillis()/1000; + MRoleMap roleMember = new MRoleMap(userName, principalType.toString(), + mRole, (int) now); + pm.makePersistent(roleMember); + commited = commitTransaction(); + success = true; + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return success; + } + + @Override + public boolean removeRoleMember(Role role, String userName, PrincipalType principalType) throws MetaException, NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MRoleMap roleMember = getMSecurityUserRoleMap(userName, principalType, + role.getRoleName()); + pm.deletePersistent(roleMember); + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private MRoleMap getMSecurityUserRoleMap(String userName, + PrincipalType principalType, String roleName) { + MRoleMap mRoleMember = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MRoleMap.class, "principalName == t1 && principalType == t2 && role.roleName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + query.setUnique(true); + mRoleMember = (MRoleMap) query.executeWithArray(userName, principalType.toString(), roleName); + pm.retrieve(mRoleMember); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mRoleMember; + } + + @Override + public boolean removeRole(String roleName) throws MetaException, + NoSuchObjectException { + boolean success = false; + try { + openTransaction(); + MRole mRol = getMRole(roleName); + pm.retrieve(mRol); + if (mRol != null) { + // first remove all the membership, the membership that this role has + // been granted + List roleMap = listRoleMembers(mRol); + if (roleMap.size() > 0) { + pm.deletePersistentAll(roleMap); + } + List roleMember = listMSecurityPrincipalMembershipRole( + mRol.getRoleName(), PrincipalType.ROLE); + if (roleMember.size() > 0) { + pm.deletePersistentAll(roleMember); + } + // then remove all the grants + List userGrants = listMSecurityPrincipalUserGrant( + mRol.getRoleName(), PrincipalType.ROLE); + if (userGrants.size() > 0) { + pm.deletePersistentAll(userGrants); + } + List dbGrants = listAllMSecurityPrincipalDBGrant( + mRol.getRoleName(), PrincipalType.ROLE); + if (dbGrants.size() > 0) { + pm.deletePersistentAll(dbGrants); + } + List tabPartGrants = listAllMSecurityPrincipalTablePartGrant( + mRol.getRoleName(), PrincipalType.ROLE); + if (tabPartGrants.size() > 0) { + pm.deletePersistentAll(tabPartGrants); + } + List columnGrants = listAllMSecurityPrincipalColumnGrant( + mRol.getRoleName(), PrincipalType.ROLE); + if (columnGrants.size() > 0) { + pm.deletePersistentAll(columnGrants); + } + // last remove the role + pm.deletePersistent(mRol); + } + success = commitTransaction(); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return success; + } + + private List listRoles(String userName, + List groupNames) { + List ret = new ArrayList(); + if(userName != null) { + ret.addAll(listRoles(userName, PrincipalType.USER)); + } + if (groupNames != null) { + for (String groupName: groupNames) { + ret.addAll(listRoles(groupName, PrincipalType.GROUP)); + } + } + return ret; + } + + @SuppressWarnings("unchecked") + @Override + public List listRoles(String principalName, + PrincipalType principalType) { + boolean success = false; + List mRoleMember = null; + try { + openTransaction(); + LOG.debug("Executing listRoles"); + Query query = pm + .newQuery( + MRoleMap.class, + "principalName == t1 && principalType == t2"); + query + .declareParameters("java.lang.String t1, java.lang.String t2"); + query.setUnique(false); + mRoleMember = (List) query.executeWithArray( + principalName, principalType.toString()); + LOG.debug("Done executing query for listMSecurityUserRoleMap"); + pm.retrieveAll(mRoleMember); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMap"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMember; + } + + + @SuppressWarnings("unchecked") + private List listMSecurityPrincipalMembershipRole(final String roleName, + final PrincipalType principalType) { + boolean success = false; + List mRoleMemebership = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalMembershipRole"); + Query query = pm.newQuery(MRoleMap.class, + "principalName == t1 && principalType == t2"); + query + .declareParameters("java.lang.String t1, java.lang.String t2"); + mRoleMemebership = (List) query.execute(roleName, principalType.toString()); + LOG + .debug("Done executing query for listMSecurityPrincipalMembershipRole"); + pm.retrieveAll(mRoleMemebership); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalMembershipRole"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemebership; + } + + public Role getRole(String roleName) throws NoSuchObjectException { + MRole mRole = this.getMRole(roleName); + if (mRole == null) { + throw new NoSuchObjectException(roleName + " role can not be found."); + } + Role ret = new Role(mRole.getRoleName(), mRole.getCreateTime(), mRole + .getOwnerName(), null); + return ret; + } + + private MRole getMRole(String roleName) { + MRole mrole = null; + boolean commited = false; + try { + openTransaction(); + Query query = pm.newQuery(MRole.class, "roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(true); + mrole = (MRole) query.execute(roleName); + pm.retrieve(mrole); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return mrole; + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + List user = this.listMSecurityPrincipalUserGrant(userName, PrincipalType.USER); + if(user.size()>0) { + Map> userPriv = new HashMap>(); + List grantInfos = new ArrayList(user.size()); + for (int i = 0; i < user.size(); i++) { + MGlobalPrivilege item = user.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + } + userPriv.put(userName, grantInfos); + ret.setUserPrivileges(userPriv); + } + } + if (groupNames != null && groupNames.size() > 0) { + Map> groupPriv = new HashMap>(); + for(String groupName: groupNames) { + List group = this.listMSecurityPrincipalUserGrant(groupName, PrincipalType.GROUP); + if(group.size()>0) { + List grantInfos = new ArrayList(group.size()); + for (int i = 0; i < group.size(); i++) { + MGlobalPrivilege item = group.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + } + groupPriv.put(groupName, grantInfos); + } + } + ret.setGroupPrivileges(groupPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + public List getDBPrivilege(String dbName, + String principalName, PrincipalType principalType) + throws InvalidObjectException, MetaException { + if (principalName != null) { + List userNameDbPriv = this.listMSecurityPrincipalDBGrant( + principalName, principalType, dbName); + if (userNameDbPriv != null && userNameDbPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameDbPriv.size()); + for (int i = 0; i < userNameDbPriv.size(); i++) { + MDBPrivilege item = userNameDbPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + return null; + } + + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> dbUserPriv = new HashMap>(); + dbUserPriv.put(userName, getDBPrivilege(dbName, userName, + PrincipalType.USER)); + ret.setUserPrivileges(dbUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> dbGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, + PrincipalType.GROUP)); + } + ret.setGroupPrivileges(dbGroupPriv); + } + List roles = listRoles(userName, groupNames); + if (roles != null && roles.size() > 0) { + Map> dbRolePriv = new HashMap>(); + for (MRoleMap role : roles) { + String name = role.getRole().getRoleName(); + dbRolePriv + .put(name, getDBPrivilege(dbName, name, PrincipalType.ROLE)); + } + ret.setRolePrivileges(dbRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + String tableName, String partition, String userName, + List groupNames) throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> partUserPriv = new HashMap>(); + partUserPriv.put(userName, getPartitionPrivilege(dbName, + tableName, partition, userName, PrincipalType.USER)); + ret.setUserPrivileges(partUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> partGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, + partition, groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(partGroupPriv); + } + List roles = listRoles(userName, groupNames); + if (roles != null && roles.size() > 0) { + Map> partRolePriv = new HashMap>(); + for (MRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, + partition, roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(partRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + String tableName, String userName, List groupNames) + throws InvalidObjectException, MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> tableUserPriv = new HashMap>(); + tableUserPriv.put(userName, getTablePrivilege(dbName, + tableName, userName, PrincipalType.USER)); + ret.setUserPrivileges(tableUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> tableGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, + groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(tableGroupPriv); + } + List roles = listRoles(userName, groupNames); + if (roles != null && roles.size() > 0) { + Map> tableRolePriv = new HashMap>(); + for (MRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, + roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(tableRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + String tableName, String partitionName, String columnName, + String userName, List groupNames) throws InvalidObjectException, + MetaException { + boolean commited = false; + PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); + try { + openTransaction(); + if (userName != null) { + Map> columnUserPriv = new HashMap>(); + columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, + columnName, partitionName, userName, PrincipalType.USER)); + ret.setUserPrivileges(columnUserPriv); + } + if (groupNames != null && groupNames.size() > 0) { + Map> columnGroupPriv = new HashMap>(); + for (String groupName : groupNames) { + columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, + columnName, partitionName, groupName, PrincipalType.GROUP)); + } + ret.setGroupPrivileges(columnGroupPriv); + } + List roles = listRoles(userName, groupNames); + if (roles != null && roles.size() > 0) { + Map> columnRolePriv = new HashMap>(); + for (MRoleMap role : roles) { + String roleName = role.getRole().getRoleName(); + columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, + columnName, partitionName, roleName, PrincipalType.ROLE)); + } + ret.setRolePrivileges(columnRolePriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return ret; + } + + private List getPartitionPrivilege(String dbName, + String tableName, String partName, String principalName, + PrincipalType principalType) { + + if (principalName != null) { + List userNameTabPartPriv = this + .listMSecurityPrincipalPartitionGrant(principalName, principalType, + dbName, tableName, partName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MTablePartitionPrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + + } + return grantInfos; + } + } + return null; + } + + private List getTablePrivilege(String dbName, + String tableName, String principalName, PrincipalType principalType) { + if (principalName != null) { + List userNameTabPartPriv = this + .listMSecurityPrincipalTableGrant(principalName, principalType, + dbName, tableName); + if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameTabPartPriv.size()); + for (int i = 0; i < userNameTabPartPriv.size(); i++) { + MTablePartitionPrivilege item = userNameTabPartPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + } + return null; + } + + private List getColumnPrivilege(String dbName, + String tableName, String columnName, String partitionName, + String principalName, PrincipalType principalType) { + List userNameColumnPriv = null; + if (partitionName == null) { + userNameColumnPriv = this.listMSecurityPrincipalTableColumnGrant( + principalName, principalType, dbName, tableName, columnName, true); + } else { + userNameColumnPriv = this.listMSecurityPrincipalPartitionColumnGrant( + principalName, principalType, dbName, tableName, partitionName, + columnName); + } + if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { + List grantInfos = new ArrayList( + userNameColumnPriv.size()); + for (int i = 0; i < userNameColumnPriv.size(); i++) { + MColumnPrivilege item = userNameColumnPriv.get(i); + grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item + .getCreateTime(), item.getGrantor(), PrincipalType.valueOf(item + .getGrantorType()), item.getGrantOption())); + } + return grantInfos; + } + return null; + } + + @Override + public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, + MetaException, NoSuchObjectException { + boolean committed = false; + int now = (int) (System.currentTimeMillis() / 1000); + try { + openTransaction(); + List persistentObjs = new ArrayList(); + + List privilegeList = privileges.getPrivileges(); + + if (privilegeList != null && privilegeList.size() > 0) { + Iterator privIter = privilegeList.iterator(); + Set privSet = new HashSet(); + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + String grantor = privDef.getGrantInfo().getGrantor(); + String grantorType = privDef.getGrantInfo().getGrantorType().toString(); + boolean grantOption = privDef.getGrantInfo().isGrantOption(); + privSet.clear(); + + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List globalPrivs = this + .listMSecurityPrincipalUserGrant(userName, principalType); + if (globalPrivs != null) { + for (MGlobalPrivilege priv : globalPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted by " + grantor); + } + MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege(userName, + principalType.toString(), privilege, now, grantor, grantorType, grantOption); + persistentObjs.add(mGlobalPrivs); + } + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + if (dbObj != null) { + List dbPrivs = this.listMSecurityPrincipalDBGrant( + userName, principalType, hiveObject.getDbName()); + if (dbPrivs != null) { + for (MDBPrivilege priv : dbPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on database " + + hiveObject.getDbName() + " by " + grantor); + } + MDBPrivilege mDb = new MDBPrivilege(userName, principalType + .toString(), dbObj, privilege, now, grantor, grantorType, grantOption); + persistentObjs.add(mDb); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + .getObjectName()); + if (tblObj != null) { + List tablePrivs = this + .listMSecurityPrincipalTableGrant(userName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName()); + if (tablePrivs != null) { + for (MTablePartitionPrivilege priv : tablePrivs) { + if (priv.getGrantor() != null + && priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on table [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "] by " + grantor); + } + MTablePartitionPrivilege mTab = new MTablePartitionPrivilege( + userName, principalType.toString(), tblObj, null, + privilege, now, grantor, grantorType, grantOption); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + MPartition partObj = this.getMPartition(hiveObject.getDbName(), + hiveObject.getObjectName(), hiveObject.getPartValues()); + String partName = null; + if (partObj != null) { + partName = partObj.getPartitionName(); + List partPrivs = this + .listMSecurityPrincipalPartitionGrant(userName, + principalType, hiveObject.getDbName(), hiveObject + .getObjectName(), partObj.getPartitionName()); + if (partPrivs != null) { + for (MTablePartitionPrivilege priv : partPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on partition [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "," + + partName + "] by " + grantor); + } + MTablePartitionPrivilege mTab = new MTablePartitionPrivilege( + userName, principalType.toString(), partObj.getTable(), + partObj, privilege, now, grantor, grantorType, grantOption); + persistentObjs.add(mTab); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + .getObjectName()); + if (tblObj != null) { + MPartition partObj = null; + List colPrivs = null; + + if (hiveObject.getPartValues() != null) { + partObj = this.getMPartition(hiveObject.getDbName(), hiveObject + .getObjectName(), hiveObject.getPartValues()); + if (partObj == null) { + continue; + } + colPrivs = this.listMSecurityPrincipalPartitionColumnGrant( + userName, principalType, hiveObject.getDbName(), hiveObject + .getObjectName(), partObj.getPartitionName(), + hiveObject.getColumnName()); + } else { + colPrivs = this.listMSecurityPrincipalTableColumnGrant( + userName, principalType, hiveObject.getDbName(), hiveObject + .getObjectName(), hiveObject.getColumnName(), true); + } + + if (colPrivs != null) { + for (MColumnPrivilege priv : colPrivs) { + if (priv.getGrantor().equalsIgnoreCase(grantor)) { + privSet.add(priv.getPrivilege()); + } + } + } + for (String privilege : privs) { + if (privSet.contains(privilege)) { + throw new InvalidObjectException(privilege + + " is already granted on column " + + hiveObject.getColumnName() + " [" + + hiveObject.getDbName() + "," + + hiveObject.getObjectName() + "," + + partObj.getPartitionName() + "] by " + grantor); + } + MColumnPrivilege mCol = new MColumnPrivilege(userName, + principalType.toString(), tblObj, partObj, hiveObject + .getColumnName(), privilege, now, grantor, grantorType, + grantOption); + persistentObjs.add(mCol); + } + } + } + } + } + if (persistentObjs.size() > 0) { + pm.makePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + @Override + public boolean revokePrivileges(PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException { + boolean committed = false; + try { + openTransaction(); + List persistentObjs = new ArrayList(); + + List privilegeList = privileges.getPrivileges(); + + + if (privilegeList != null && privilegeList.size() > 0) { + Iterator privIter = privilegeList.iterator(); + + while (privIter.hasNext()) { + HiveObjectPrivilege privDef = privIter.next(); + HiveObjectRef hiveObject = privDef.getHiveObject(); + String privilegeStr = privDef.getGrantInfo().getPrivilege(); + if (privilegeStr == null || privilegeStr.trim().equals("")) { + continue; + } + String[] privs = privilegeStr.split(","); + String userName = privDef.getPrincipalName(); + PrincipalType principalType = privDef.getPrincipalType(); + + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + List mSecUser = this.listMSecurityPrincipalUserGrant( + userName, principalType); + boolean found = false; + if (mSecUser != null) { + for (String privilege : privs) { + for (MGlobalPrivilege userGrant : mSecUser) { + String userGrantPrivs = userGrant.getPrivilege(); + if (privilege.equals(userGrantPrivs)) { + found = true; + persistentObjs.add(userGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No user grant found for privileges " + privilege); + } + } + } + + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + if (dbObj != null) { + String db = hiveObject.getDbName(); + boolean found = false; + List dbGrants = this.listMSecurityPrincipalDBGrant( + userName, principalType, db); + for (String privilege : privs) { + for (MDBPrivilege dbGrant : dbGrants) { + String dbGrantPriv = dbGrant.getPrivilege(); + if (privilege.equals(dbGrantPriv)) { + found = true; + persistentObjs.add(dbGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException( + "No database grant found for privileges " + privilege + + " on database " + db); + } + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + boolean found = false; + List tableGrants = this + .listMSecurityPrincipalTableGrant(userName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName()); + for (String privilege : privs) { + for (MTablePartitionPrivilege tabGrant : tableGrants) { + String tableGrantPriv = tabGrant.getPrivilege(); + if (privilege.equalsIgnoreCase(tableGrantPriv)) { + found = true; + persistentObjs.add(tabGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + hiveObject.getObjectName() + + ", database is " + hiveObject.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + + boolean found = false; + Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject.getObjectName()); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); + } + List partitionGrants = this + .listMSecurityPrincipalPartitionGrant(userName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), partName); + for (String privilege : privs) { + for (MTablePartitionPrivilege partGrant : partitionGrants) { + String partPriv = partGrant.getPrivilege(); + if (partPriv.equalsIgnoreCase(privilege)) { + found = true; + persistentObjs.add(partGrant); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + tabObj.getTableName() + + ", partition is " + partName + ", database is " + tabObj.getDbName()); + } + } + } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + + Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject + .getObjectName()); + String partName = null; + if (hiveObject.getPartValues() != null) { + partName = Warehouse.makePartName(tabObj.getPartitionKeys(), + hiveObject.getPartValues()); + } + List mSecCol = listMSecurityTabOrPartColumnGrant( + userName, principalType, hiveObject.getDbName(), hiveObject + .getObjectName(), partName, hiveObject.getColumnName()); + boolean found = false; + if (mSecCol != null) { + for (String privilege : privs) { + for (MColumnPrivilege col : mSecCol) { + String colPriv = col.getPrivilege(); + if (colPriv.equalsIgnoreCase(privilege)) { + found = true; + persistentObjs.add(col); + break; + } + } + if (!found) { + throw new InvalidObjectException("No grant (" + privilege + + ") found " + " on table " + tabObj.getTableName() + + ", partition is " + partName + ", column name = " + + hiveObject.getColumnName() + ", database is " + + tabObj.getDbName()); + } + } + } + } + } + } + + if (persistentObjs.size() > 0) { + pm.deletePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + public boolean revokeAllPrivileges(String userName, + PrincipalType principalType, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException { + boolean committed = false; + try { + openTransaction(); + List persistentObjs = new ArrayList(); + if (removeUserPriv) { + List mSecUser = this.listMSecurityPrincipalUserGrant( + userName, principalType); + if (mSecUser != null) { + persistentObjs.addAll(persistentObjs); + } + } + + if (dbs != null) { + for (Database db : dbs) { + List dbGrants = this.listMSecurityPrincipalDBGrant( + userName, principalType, db.getName()); + persistentObjs.addAll(dbGrants); + } + } + + if (tables != null) { + for (Table tab : tables) { + List tabGrants = this + .listMSecurityPrincipalTableGrant(userName, principalType, tab + .getDbName(), tab.getTableName()); + persistentObjs.addAll(tabGrants); + } + } + + if (parts != null) { + for (Partition part : parts) { + Table tabObj = this.getTable(part.getDbName(), part.getTableName()); + List partGrants = this + .listMSecurityPrincipalPartitionGrant(userName, principalType, + part.getDbName(), part.getTableName(), + Warehouse.makePartName(tabObj.getPartitionKeys(), part + .getValues())); + persistentObjs.addAll(partGrants); + } + } + + if (columns != null) { + for (Map.Entry> tableColMap : columns.entrySet()) { + Table table = tableColMap.getKey(); + List colList = tableColMap.getValue(); + for (String col : colList) { + List secCol = this + .listMSecurityPrincipalTableColumnGrant(userName, + principalType, table.getDbName(), table.getTableName(), + col, false); + persistentObjs.addAll(secCol); + } + } + } + + if (persistentObjs.size() > 0) { + pm.deletePersistentAll(persistentObjs); + } + committed = commitTransaction(); + } finally { + if (!committed) { + rollbackTransaction(); + } + } + return committed; + } + + @SuppressWarnings("unchecked") + private List listRoleMembers( + MRole mRol) { + boolean success = false; + List mRoleMemeberList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityUserRoleMember"); + Query query = pm.newQuery(MRoleMap.class, + "role.roleName == t1"); + query.declareParameters("java.lang.String t1"); + query.setUnique(false); + mRoleMemeberList = (List) query.execute( + mRol.getRoleName()); + LOG.debug("Done executing query for listMSecurityUserRoleMember"); + pm.retrieveAll(mRoleMemeberList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityUserRoleMember"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mRoleMemeberList; + } + + @SuppressWarnings("unchecked") + @Override + public List listMSecurityPrincipalUserGrant(String principalName, PrincipalType principalType) { + boolean commited = false; + List userNameDbPriv = null; + try { + openTransaction(); + if (principalName != null) { + Query query = pm.newQuery(MGlobalPrivilege.class, + "principalName == t1 && principalType == t2 "); + query.declareParameters( + "java.lang.String t1, java.lang.String t2"); + userNameDbPriv = (List) query + .executeWithArray(principalName, principalType.toString()); + pm.retrieveAll(userNameDbPriv); + } + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + } + return userNameDbPriv; + } + + @SuppressWarnings("unchecked") + @Override + public List listMSecurityPrincipalDBGrant(String principalName, + PrincipalType principalType, String dbName) { + boolean success = false; + List mSecurityDBList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalDBGrant"); + Query query = pm.newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2 && database.name == t3"); + query + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityDBList = (List) query.executeWithArray(principalName, principalType.toString(), dbName); + LOG.debug("Done executing query for listMSecurityPrincipalDBGrant"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalDBGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalDBGrant( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityDBList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalDBGrant"); + Query query = pm.newQuery(MDBPrivilege.class, + "principalName == t1 && principalType == t2"); + query + .declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityDBList = (List) query.execute(principalName, principalType.toString()); + LOG.debug("Done executing query for listAllMSecurityPrincipalDBGrant"); + pm.retrieveAll(mSecurityDBList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalDBGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityDBList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityTablePart(String dbName, + String tableName, boolean tableOnly) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTable"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery( + MTablePartitionPrivilege.class, queryStr); + query.declareParameters( + "java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listMSecurityTable"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityTable"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityTablePartColumn(String dbName, + String tableName, boolean tableOnly) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTablePartColumn"); + String queryStr = "table.tableName == t1 && table.database.name == t2"; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery(MColumnPrivilege.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColList = (List) query + .executeWithArray(tableName, dbName); + LOG.debug("Done executing query for listMSecurityTablePartColumn"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityTablePartColumn"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + private List listMSecurityPartition(String dbName, String tableName, + String partName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityTablePartition"); + Query query = pm.newQuery(MTablePartitionPrivilege.class, + "table.tableName == t1 && table.database.name == t2 && partition.partitionName == t3"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityTabPartList = (List) query + .executeWithArray(tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityTablePartition"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityTablePartition"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPartitionColumn(String dbName, + String tableName, String partName) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPartitionColumn"); + Query query = pm.newQuery( + MColumnPrivilege.class, + "table.tableName == t1 && table.database.name == t2 && partition.partitionName == t3"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3"); + mSecurityColList = (List) query + .executeWithArray(tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityPartitionColumn"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPartitionColumn"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalTableGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalTableGrant"); + Query query = pm.newQuery( + MTablePartitionPrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 && table.database.name == t4 && partition == null"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, principalType.toString(), tableName, dbName); + LOG.debug("Done executing query for listMSecurityPrincipalTableGrant"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalTableGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + @Override + public List listMSecurityPrincipalPartitionGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partName) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalPartitionGrant"); + Query query = pm.newQuery( + MTablePartitionPrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 " + + "&& table.database.name == t4 && partition.partitionName == t5"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, java.lang.String t5"); + mSecurityTabPartList = (List) query + .executeWithArray(principalName, principalType.toString(), tableName, dbName, partName); + LOG.debug("Done executing query for listMSecurityPrincipalPartitionGrant"); + + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalTablePartGrant( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityTabPartList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalTablePartGrant"); + Query query = pm.newQuery(MTablePartitionPrivilege.class, + "principalName == t1 && principalType == t2"); + query + .declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityTabPartList = (List) query.execute( + principalName, principalType.toString()); + LOG.debug("Done executing query for listAllMSecurityPrincipalTablePartGrant"); + pm.retrieveAll(mSecurityTabPartList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalTablePartGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityTabPartList; + } + + public List listMSecurityTabOrPartColumnGrant(String userName, + PrincipalType principalType, String dbName, String tabName, + String partName, String columnName) { + List mSecCol = null; + if (partName != null) { + mSecCol = this.listMSecurityPrincipalPartitionColumnGrant(userName, + principalType, dbName, tabName, partName, columnName); + } else { + mSecCol = this.listMSecurityPrincipalTableColumnGrant(userName, principalType, dbName, tabName, columnName, true); + } + return mSecCol; + } + + @SuppressWarnings("unchecked") + @Override + public List listMSecurityPrincipalTableColumnGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String columnName, boolean tableOnly) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalColumnGrant"); + String queryStr = "principalName == t1 && principalType == t2 && " + + "table.tableName == t3 && table.database.name == t4 && columnName == t5 "; + if (tableOnly) { + queryStr = queryStr + " && partition == null"; + } + Query query = pm.newQuery(MColumnPrivilege.class, queryStr); + query + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, java.lang.String t5"); + mSecurityColList = (List) query.executeWithArray( + principalName, principalType.toString(), tableName, dbName, columnName); + LOG.debug("Done executing query for listMSecurityPrincipalColumnGrant"); + pm.retrieveAll(mSecurityColList); + success = commitTransaction(); + LOG + .debug("Done retrieving all objects for listMSecurityPrincipalColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + public List listMSecurityPrincipalPartitionColumnGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partitionName, String columnName) { + boolean success = false; + List mSecurityColList = null; + try { + openTransaction(); + LOG.debug("Executing listMSecurityPrincipalPartitionColumnGrant"); + Query query = pm.newQuery( + MColumnPrivilege.class, + "principalName == t1 && principalType == t2 && table.tableName == t3 && table.database.name == t4 && " + + "partition.partitionName == t5 && columnName == t6"); + query + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4, java.lang.String t5, java.lang.String t6"); + + mSecurityColList = (List) query.executeWithArray( + principalName, principalType.toString(), tableName, dbName, partitionName, + columnName); + LOG.debug("Done executing query for listMSecurityPrincipalPartitionColumnGrant"); + pm.retrieveAll(mSecurityColList); + + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listMSecurityPrincipalPartitionColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColList; + } + + @SuppressWarnings("unchecked") + private List listAllMSecurityPrincipalColumnGrant( + String principalName, PrincipalType principalType) { + boolean success = false; + List mSecurityColumnList = null; + try { + openTransaction(); + LOG.debug("Executing listAllMSecurityPrincipalColumnGrant"); + Query query = pm.newQuery(MColumnPrivilege.class, + "principalName == t1 && principalType == t2"); + query + .declareParameters("java.lang.String t1, java.lang.String t2"); + mSecurityColumnList = (List) query.execute( + principalName, principalType.toString()); + LOG.debug("Done executing query for listAllMSecurityPrincipalColumnGrant"); + pm.retrieveAll(mSecurityColumnList); + success = commitTransaction(); + LOG.debug("Done retrieving all objects for listAllMSecurityPrincipalColumnGrant"); + } finally { + if (!success) { + rollbackTransaction(); + } + } + return mSecurityColumnList; + } + } Index: metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java =================================================================== --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (revision 1050266) +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (working copy) @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.Database; @@ -27,8 +28,17 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.model.MColumnPrivilege; +import org.apache.hadoop.hive.metastore.model.MDBPrivilege; +import org.apache.hadoop.hive.metastore.model.MTablePartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; +import org.apache.hadoop.hive.metastore.model.MRoleMap; public interface RawStore extends Configurable { @@ -131,5 +141,80 @@ public abstract List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; + + public abstract boolean addRole(String rowName, String ownerName) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + + public abstract boolean addRoleMember(Role role, String userName, PrincipalType principalType) + throws MetaException, NoSuchObjectException; + + public abstract boolean removeRoleMember(Role role, String userName, PrincipalType principalType) + throws MetaException, NoSuchObjectException; + public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + + public abstract List listMSecurityPrincipalUserGrant(String principalName, + PrincipalType principalType); + + public abstract List listMSecurityPrincipalDBGrant(String principalName, + PrincipalType principalType, String dbName); + + public abstract List listMSecurityPrincipalTableGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName); + + public abstract List listMSecurityPrincipalPartitionGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partName); + + public List listMSecurityTabOrPartColumnGrant(String userName, + PrincipalType principalType, String dbName, String tabName, + String partName, String column); + + public abstract List listMSecurityPrincipalTableColumnGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String columnName, boolean tableOnly); + + public abstract List listMSecurityPrincipalPartitionColumnGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, String partName, String columnName); + + public abstract boolean grantPrivileges (PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + public abstract boolean revokePrivileges (PrivilegeBag privileges) + throws InvalidObjectException, MetaException, NoSuchObjectException; + + public abstract org.apache.hadoop.hive.metastore.api.Role getRole(String roleName) throws NoSuchObjectException; + + public List listRoles(String principalName, + PrincipalType principalType); + + public boolean revokeAllPrivileges(String userName, PrincipalType principalType, boolean removeUserPriv, List dbs, + List
tables, List parts, + Map> columns) throws MetaException; + + public abstract Partition getPartitionWithAuth(String dbName, String tblName, + List partVals, String user_name, List group_names) + throws MetaException, NoSuchObjectException, InvalidObjectException; + + public abstract List getPartitionsWithAuth(String dbName, + String tblName, short maxParts, String userName, List groupNames) + throws MetaException, NoSuchObjectException, InvalidObjectException;; + } Index: metastore/src/model/package.jdo =================================================================== --- metastore/src/model/package.jdo (revision 1050266) +++ metastore/src/model/package.jdo (working copy) @@ -357,5 +357,181 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MColumnPrivilege.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MColumnPrivilege.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MColumnPrivilege.java (revision 0) @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MColumnPrivilege { + + private String principalName; + + private String principalType; + + private MTable table; + + private MPartition partition; + + private String columnName; + + private String privilege; + + private int createTime; + + private String grantor; + + private String grantorType; + + private boolean grantOption; + + public MColumnPrivilege() { + } + + /** + * @param principalName + * @param isRole + * @param isGroup + * @param table + * @param partition + * @param columnName + * @param privileges + * @param createTime + * @param grantor + */ + public MColumnPrivilege(String principalName, String principalType, + MTable table, MPartition partition, String columnName, String privileges, int createTime, + String grantor, String grantorType, boolean grantOption) { + super(); + this.principalName = principalName; + this.principalType = principalType; + this.table = table; + this.partition = partition; + this.columnName = columnName; + this.privilege = privileges; + this.createTime = createTime; + this.grantor = grantor; + this.grantorType = grantorType; + this.grantOption = grantOption; + } + + /** + * @return column name + */ + public String getColumnName() { + return columnName; + } + + /** + * @param columnName column name + */ + public void setColumnName(String columnName) { + this.columnName = columnName; + } + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivilege() { + return privilege; + } + + /** + * @param dbPrivileges a set of privileges this user/role/group has + */ + public void setPrivilege(String dbPrivileges) { + this.privilege = dbPrivileges; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + public MTable getTable() { + return table; + } + + public void setTable(MTable table) { + this.table = table; + } + + public MPartition getPartition() { + return partition; + } + + public void setPartition(MPartition partition) { + this.partition = partition; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public String getGrantorType() { + return grantorType; + } + + public void setGrantorType(String grantorType) { + this.grantorType = grantorType; + } + + public boolean getGrantOption() { + return grantOption; + } + + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + public String getPrincipalType() { + return principalType; + } + + public void setPrincipalType(String principalType) { + this.principalType = principalType; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java (revision 0) @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MDBPrivilege { + + private String principalName; + + private String principalType; + + private MDatabase database; + + private int createTime; + + private String privilege; + + private String grantor; + + private String grantorType; + + private boolean grantOption; + + public MDBPrivilege() { + } + + public MDBPrivilege(String principalName, String principalType, + MDatabase database, String dbPrivileges, int createTime, String grantor, + String grantorType, boolean grantOption) { + super(); + this.principalName = principalName; + this.principalType = principalType; + this.database = database; + this.privilege = dbPrivileges; + this.createTime = createTime; + this.grantorType = grantorType; + this.grantOption = grantOption; + this.grantor = grantor; + } + + /** + * @return user name, role name, or group name + */ + public String getPrincipalName() { + return principalName; + } + + /** + * @param userName user/role/group name + */ + public void setPrincipalName(String userName) { + this.principalName = userName; + } + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivilege() { + return privilege; + } + + /** + * @param dbPrivileges a set of privileges this user/role/group has + */ + public void setPrivilege(String dbPrivilege) { + this.privilege = dbPrivilege; + } + + public MDatabase getDatabase() { + return database; + } + + public void setDatabase(MDatabase database) { + this.database = database; + } + + public int getCreateTime() { + return createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public String getGrantorType() { + return grantorType; + } + + public void setGrantorType(String grantorType) { + this.grantorType = grantorType; + } + + public boolean getGrantOption() { + return grantOption; + } + + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + public String getPrincipalType() { + return principalType; + } + + public void setPrincipalType(String principalType) { + this.principalType = principalType; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java (revision 0) @@ -0,0 +1,120 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +/** + * User global level privileges + */ +public class MGlobalPrivilege { + + //principal name, can be a user, group, or role + private String principalName; + + private String principalType; + + private String privilege; + + private int createTime; + + private String grantor; + + private String grantorType; + + private boolean grantOption; + + public MGlobalPrivilege() { + super(); + } + + public MGlobalPrivilege(String userName, String principalType, + String dbPrivilege, int createTime, String grantor, String grantorType, + boolean grantOption) { + super(); + this.principalName = userName; + this.principalType = principalType; + this.privilege = dbPrivilege; + this.createTime = createTime; + this.grantor = grantor; + this.grantorType = grantorType; + this.grantOption = grantOption; + } + + /** + * @return a set of global privileges granted to this user + */ + public String getPrivilege() { + return privilege; + } + + /** + * @param dbPrivileges set of global privileges to user + */ + public void setPrivilege(String dbPrivilege) { + this.privilege = dbPrivilege; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + public int getCreateTime() { + return createTime; + } + + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public boolean getGrantOption() { + return grantOption; + } + + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + public String getPrincipalType() { + return principalType; + } + + public void setPrincipalType(String principalType) { + this.principalType = principalType; + } + + public String getGrantorType() { + return grantorType; + } + + public void setGrantorType(String grantorType) { + this.grantorType = grantorType; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MRole.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MRole.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MRole.java (revision 0) @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MRole { + + private String roleName; + + private int createTime; + + private String ownerName; + + public MRole() { + } + + public MRole(String roleName, int createTime, String ownerName) { + super(); + this.roleName = roleName; + this.createTime = createTime; + this.ownerName = ownerName; + } + + /** + * @return role name + */ + public String getRoleName() { + return roleName; + } + + /** + * @param roleName + */ + public void setRoleName(String roleName) { + this.roleName = roleName; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime + * role create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + /** + * @return the principal name who created this role + */ + public String getOwnerName() { + return ownerName; + } + + public void setOwnerName(String ownerName) { + this.ownerName = ownerName; + } + +} + Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MRoleMap.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MRoleMap.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MRoleMap.java (revision 0) @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MRoleMap { + + private String principalName; + + private String principalType; + + private MRole role; + + private int addTime; + + private boolean grantOption; + + public MRoleMap() { + } + + public MRoleMap(String principalName, String principalType, MRole role, int addTime) { + super(); + this.principalName = principalName; + this.principalType = principalType; + this.role = role; + this.addTime = addTime; + } + + /** + * @return principal name + */ + public String getPrincipalName() { + return principalName; + } + + /** + * @param userName principal name + */ + public void setPrincipalName(String userName) { + this.principalName = userName; + } + + public String getPrincipalType() { + return principalType; + } + + public void setPrincipalType(String principalType) { + this.principalType = principalType; + } + + /** + * @return add time + */ + public int getAddTime() { + return addTime; + } + + /** + * @param addTime + */ + public void setAddTime(int addTime) { + this.addTime = addTime; + } + + public MRole getRole() { + return role; + } + + public void setRole(MRole role) { + this.role = role; + } + + public boolean getGrantOption() { + return grantOption; + } + + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + +} Index: metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePartitionPrivilege.java =================================================================== --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePartitionPrivilege.java (revision 0) +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePartitionPrivilege.java (revision 0) @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MTablePartitionPrivilege { + + private String principalName; + + private String principalType; + + private MTable table; + + private MPartition partition; + + private String privilege; + + private int createTime; + + private String grantor; + + private String grantorType; + + private boolean grantOption; + + public MTablePartitionPrivilege() { + } + + public MTablePartitionPrivilege(String principalName, String principalType, + MTable table, MPartition partition, String privilege, int createTime, + String grantor, String grantorType, boolean grantOption) { + super(); + this.principalName = principalName; + this.principalType = principalType; + this.table = table; + this.partition = partition; + this.privilege = privilege; + this.createTime = createTime; + this.grantor = grantor; + this.grantorType = grantorType; + this.grantOption = grantOption; + } + + public String getPrincipalName() { + return principalName; + } + + public void setPrincipalName(String principalName) { + this.principalName = principalName; + } + + + /** + * @return a set of privileges this user/role/group has + */ + public String getPrivilege() { + return privilege; + } + + /** + * @param dbPrivilege a set of privileges this user/role/group has + */ + public void setPrivilege(String dbPrivilege) { + this.privilege = dbPrivilege; + } + + /** + * @return create time + */ + public int getCreateTime() { + return createTime; + } + + /** + * @param createTime create time + */ + public void setCreateTime(int createTime) { + this.createTime = createTime; + } + + /** + * @return + */ + public String getGrantor() { + return grantor; + } + + /** + * @param grantor + */ + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public String getPrincipalType() { + return principalType; + } + + public void setPrincipalType(String principalType) { + this.principalType = principalType; + } + + public MTable getTable() { + return table; + } + + public void setTable(MTable table) { + this.table = table; + } + + public MPartition getPartition() { + return partition; + } + + public void setPartition(MPartition partition) { + this.partition = partition; + } + + public boolean getGrantOption() { + return grantOption; + } + + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + public String getGrantorType() { + return grantorType; + } + + public void setGrantorType(String grantorType) { + this.grantorType = grantorType; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.ql.exec.ExecDriver; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.TaskResult; @@ -69,22 +70,29 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject; import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData; import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.metadata.DummyPartition; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ErrorMsg; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext; import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl; +import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.ParseDriver; import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.parse.VariableSubstitution; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.processors.CommandProcessor; @@ -375,8 +383,20 @@ if (plan.getFetchTask() != null) { plan.getFetchTask().initialize(conf, plan, null); } + + //do the authorization check + if (HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { + try { + doAuthorization(sem); + } catch (AuthorizationException authExp) { + console.printError("Authorization failed:" + authExp.getMessage() + + ". Use show grant to get more details."); + return 403; + } + } - return (0); + return 0; } catch (SemanticException e) { errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); @@ -398,6 +418,138 @@ } } + private boolean doAuthorization(BaseSemanticAnalyzer sem) + throws HiveException, AuthorizationException { + HashSet inputs = sem.getInputs(); + HashSet outputs = sem.getOutputs(); + SessionState ss = SessionState.get(); + HiveOperation op = ss.getHiveOperation(); + Hive db = sem.getDb(); + boolean pass = true; + if (op != null) { + if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) + || op.equals(HiveOperation.CREATETABLE)) { + pass = ss.getAuthorizer().authorize( + db.getDatabase(db.getCurrentDatabase()), null, + HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges()); + } + if (outputs != null && outputs.size() > 0) { + for (WriteEntity write : outputs) { + + if (write.getType() == WriteEntity.Type.PARTITION) { + Partition part = db.getPartition(write.getTable(), write + .getPartition().getSpec(), false); + if (part != null) { + pass = pass + && ss.getAuthorizer().authorize(write.getPartition(), null, + op.getOutputRequiredPrivileges()); + continue; + } + } + + if (write.getTable() != null) { + pass = pass + && ss.getAuthorizer().authorize(write.getTable(), null, + op.getOutputRequiredPrivileges()); + } + + if (!pass) { + break; + } + } + + } + } + + if (pass && inputs != null && inputs.size() > 0) { + + Map> tab2Cols = new HashMap>(); + Map> part2Cols = new HashMap>(); + + for (ReadEntity read : inputs) { + boolean part = read.getPartition() != null; + if (part) { + part2Cols.put(read.getPartition(), new ArrayList()); + } else { + tab2Cols.put(read.getTable(), new ArrayList()); + } + } + + if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) + || op.equals(HiveOperation.QUERY)) { + SemanticAnalyzer querySem = (SemanticAnalyzer) sem; + ParseContext parseCtx = querySem.getParseContext(); + Map tsoTopMap = parseCtx.getTopToTable(); + + for (Map.Entry> topOpMap : querySem + .getParseContext().getTopOps().entrySet()) { + Operator topOp = topOpMap.getValue(); + if (topOp instanceof TableScanOperator + && tsoTopMap.containsKey(topOp)) { + TableScanOperator tableScanOp = (TableScanOperator) topOp; + Table tbl = tsoTopMap.get(tableScanOp); + List neededColumnIds = tableScanOp.getNeededColumnIDs(); + List columns = tbl.getCols(); + List cols = new ArrayList(); + if (neededColumnIds != null && neededColumnIds.size() > 0) { + for (int i = 0; i < neededColumnIds.size(); i++) { + cols.add(columns.get(neededColumnIds.get(i)).getName()); + } + } else { + for (int i = 0; i < columns.size(); i++) { + cols.add(columns.get(i).getName()); + } + } + if (tbl.isPartitioned()) { + String alias_id = topOpMap.getKey(); + PrunedPartitionList partsList = PartitionPruner.prune(parseCtx + .getTopToTable().get(topOp), parseCtx.getOpToPartPruner() + .get(topOp), parseCtx.getConf(), alias_id, parseCtx + .getPrunedPartitions()); + Set parts = new HashSet(); + parts.addAll(partsList.getConfirmedPartns()); + parts.addAll(partsList.getUnknownPartns()); + for (Partition part : parts) { + part2Cols.put(part, cols); + } + } else { + tab2Cols.put(tbl, cols); + } + } + } + } + + for (ReadEntity read : inputs) { + if (read.getPartition() != null) { + List cols = part2Cols.get(read.getPartition()); + if (cols != null && cols.size() > 0) { + pass = pass + && ss.getAuthorizer().authorize(read.getPartition().getTable(), + read.getPartition(), cols, op.getInputRequiredPrivileges(), + null); + } else { + pass = pass + && ss.getAuthorizer().authorize(read.getPartition(), + op.getInputRequiredPrivileges(), null); + } + } else if (read.getTable() != null) { + List cols = tab2Cols.get(read.getTable()); + if (cols != null && cols.size() > 0) { + pass = pass + && ss.getAuthorizer().authorize(read.getTable(), null, cols, + op.getInputRequiredPrivileges(), null); + } else { + pass = pass + && ss.getAuthorizer().authorize(read.getTable(), + op.getInputRequiredPrivileges(), null); + } + } + } + + } + return pass; + } + /** * @return The current query plan associated with this Driver, if any. */ @@ -755,8 +907,6 @@ boolean noName = StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HADOOPJOBNAME)); int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); - int curJobNo = 0; - String queryId = plan.getQueryId(); String queryStr = plan.getQueryStr(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -58,11 +58,18 @@ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; @@ -99,10 +106,18 @@ import org.apache.hadoop.hive.ql.plan.DropDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DropIndexDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.GrantDesc; +import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; import org.apache.hadoop.hive.ql.plan.LockTableDesc; import org.apache.hadoop.hive.ql.plan.MsckDesc; +import org.apache.hadoop.hive.ql.plan.PrincipalDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RevokeDesc; +import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; @@ -111,6 +126,8 @@ import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; @@ -121,10 +138,12 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; + /** * DDLTask implementation. - * + * **/ public class DDLTask extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -149,12 +168,12 @@ super.initialize(conf, queryPlan, ctx); this.conf = conf; - INTERMEDIATE_ARCHIVED_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED); - INTERMEDIATE_ORIGINAL_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL); - INTERMEDIATE_EXTRACTED_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED); + INTERMEDIATE_ARCHIVED_DIR_SUFFIX = HiveConf.getVar(conf, + ConfVars.METASTORE_INT_ARCHIVED); + INTERMEDIATE_ORIGINAL_DIR_SUFFIX = HiveConf.getVar(conf, + ConfVars.METASTORE_INT_ORIGINAL); + INTERMEDIATE_EXTRACTED_DIR_SUFFIX = HiveConf.getVar(conf, + ConfVars.METASTORE_INT_EXTRACTED); } @Override @@ -296,6 +315,33 @@ return showPartitions(db, showParts); } + RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc(); + if (roleDDLDesc != null) { + return roleDDL(roleDDLDesc); + } + + GrantDesc grantDesc = work.getGrantDesc(); + if (grantDesc != null) { + return grantOrRevokePrivileges(grantDesc.getPrincipals(), grantDesc + .getPrivileges(), grantDesc.getPrivilegeSubjectDesc(), grantDesc.getGrantor(), grantDesc.getGrantorType(), grantDesc.isGrantOption(), true); + } + + RevokeDesc revokeDesc = work.getRevokeDesc(); + if (revokeDesc != null) { + return grantOrRevokePrivileges(revokeDesc.getPrincipals(), revokeDesc + .getPrivileges(), revokeDesc.getPrivilegeSubjectDesc(), null, null, false, false); + } + + ShowGrantDesc showGrantDesc = work.getShowGrantDesc(); + if (showGrantDesc != null) { + return showGrants(showGrantDesc); + } + + GrantRevokeRoleDDL grantOrRevokeRoleDDL = work.getGrantRevokeRoleDDL(); + if (grantOrRevokeRoleDDL != null) { + return grantOrRevokeRole(grantOrRevokeRoleDDL); + } + ShowIndexesDesc showIndexes = work.getShowIndexesDesc(); if (showIndexes != null) { return showIndexes(db, showIndexes); @@ -319,31 +365,388 @@ return 0; } + private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL) + throws HiveException { + try { + boolean grantRole = grantOrRevokeRoleDDL.getGrant(); + List principals = grantOrRevokeRoleDDL.getPrincipalDesc(); + List roles = grantOrRevokeRoleDDL.getRoles(); + for (PrincipalDesc principal : principals) { + String userName = principal.getName(); + for (String roleName : roles) { + if (grantRole) { + db.addRoleMember(roleName, userName, principal.getType()); + } else { + db.removeRoleMember(roleName, userName, principal.getType()); + } + } + } + } catch (Exception e) { + throw new HiveException(e); + } + return 0; + } + + private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { + try { + Path resFile = new Path(showGrantDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + PrincipalDesc principalDesc = showGrantDesc.getPrincipalDesc(); + PrivilegeObjectDesc hiveObjectDesc = showGrantDesc.getHiveObj(); + String principalName = principalDesc.getName(); + if (hiveObjectDesc == null) { + List users = db.showUserLevelGrant(principalName, + principalDesc.getType()); + if (users != null && users.size() > 0) { + boolean first = true; + for (HiveObjectPrivilege usr : users) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfo(outStream, principalDesc.getType(), principalName, + null, null, null, null, usr.getGrantInfo()); + + } + } + } else { + String obj = hiveObjectDesc.getObject(); + boolean notFound = true; + String dbName = null; + String tableName = null; + Table tableObj = null; + Database dbObj = null; + + if (hiveObjectDesc.getTable()) { + String[] dbTab = obj.split("\\."); + if (dbTab.length == 2) { + dbName = dbTab[0]; + tableName = dbTab[1]; + } else { + dbName = db.getCurrentDatabase(); + tableName = obj; + } + dbObj = db.getDatabase(dbName); + tableObj = db.getTable(dbName, tableName); + notFound = (dbObj == null || tableObj == null); + } else { + dbName = hiveObjectDesc.getObject(); + dbObj = db.getDatabase(dbName); + notFound = (dbObj == null); + } + if (notFound) { + throw new HiveException(obj + " can not be found"); + } + + String partName = null; + List partValues = null; + if (hiveObjectDesc.getPartSpec() != null) { + partName = Warehouse + .makePartName(hiveObjectDesc.getPartSpec(), false); + partValues = Warehouse.getPartValuesFromPartName(partName); + } + + if (!hiveObjectDesc.getTable()) { + // show database level privileges + List dbs = db.showDBLevelGrant(principalName, + principalDesc.getType(), dbName); + if (dbs != null && dbs.size() > 0) { + boolean first = true; + for (HiveObjectPrivilege db : dbs) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfo(outStream, principalDesc.getType(), principalName, + dbName, null, null, null, db.getGrantInfo()); + + } + } + + } else { + if (showGrantDesc.getColumns() != null) { + // show column level privileges + for (String columnName : showGrantDesc.getColumns()) { + List columnss = db.showColumnGrant( + principalName, principalDesc.getType(), dbName, tableName, + partValues, columnName); + if (columnss != null && columnss.size() > 0) { + boolean first = true; + for (HiveObjectPrivilege col : columnss) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfo(outStream, principalDesc.getType(), + principalName, dbName, tableName, partName, columnName, + col.getGrantInfo()); + } + } + } + } else if (hiveObjectDesc.getPartSpec() != null) { + // show partition level privileges + List parts = db.showPartitionGrant( + principalName, principalDesc.getType(), dbName, tableName, + partValues); + if (parts != null && parts.size() > 0) { + boolean first = true; + for (HiveObjectPrivilege part : parts) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfo(outStream, principalDesc.getType(), + principalName, dbName, tableName, partName, null, part.getGrantInfo()); + + } + } + } else { + // show table level privileges + List tbls = db.showTableLevelGrant( + principalName, principalDesc.getType(), dbName, tableName); + if (tbls != null && tbls.size() > 0) { + boolean first = true; + for (HiveObjectPrivilege tbl : tbls) { + if (!first) { + outStream.write(terminator); + } else { + first = false; + } + + writeGrantInfo(outStream, principalDesc.getType(), + principalName, dbName, tableName, null, null, tbl.getGrantInfo()); + + } + } + } + } + } + ((FSDataOutputStream) outStream).close(); + } catch (FileNotFoundException e) { + LOG.info("show table status: " + stringifyException(e)); + return 1; + } catch (IOException e) { + LOG.info("show table status: " + stringifyException(e)); + return 1; + } catch (Exception e) { + e.printStackTrace(); + throw new HiveException(e); + } + return 0; + } + + private int grantOrRevokePrivileges(List principals, + List privileges, PrivilegeObjectDesc privSubjectDesc, + String grantor, PrincipalType grantorType, boolean grantOption, boolean isGrant) { + if (privileges == null || privileges.size() == 0) { + console.printError("No privilege found."); + return 1; + } + + String dbName = null; + String tableName = null; + Table tableObj = null; + Database dbObj = null; + + try { + + if (privSubjectDesc != null) { + if (privSubjectDesc.getPartSpec() != null && isGrant) { + throw new HiveException("Grant does not support partition level."); + } + String obj = privSubjectDesc.getObject(); + boolean notFound = true; + if (privSubjectDesc.getTable()) { + String[] dbTab = obj.split("\\."); + if (dbTab.length == 2) { + dbName = dbTab[0]; + tableName = dbTab[1]; + } else { + dbName = db.getCurrentDatabase(); + tableName = obj; + } + dbObj = db.getDatabase(dbName); + tableObj = db.getTable(dbName, tableName); + notFound = (dbObj == null || tableObj == null); + } else { + dbName = privSubjectDesc.getObject(); + dbObj = db.getDatabase(dbName); + notFound = (dbObj == null); + } + if (notFound) { + throw new HiveException(obj + " can not be found"); + } + } + + PrivilegeBag privBag = new PrivilegeBag(); + if (privSubjectDesc == null) { + for (int idx = 0; idx < privileges.size(); idx++) { + Privilege priv = privileges.get(idx).getPrivilege(); + if (privileges.get(idx).getColumns() != null + && privileges.get(idx).getColumns().size() > 0) { + throw new HiveException( + "For user-level privileges, column sets should be null. columns=" + + privileges.get(idx).getColumns().toString()); + } + + privBag.addToPrivileges(new HiveObjectPrivilege(new HiveObjectRef( + HiveObjectType.GLOBAL, null, null, null, null), null, null, + new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType, + grantOption))); + } + } else { + org.apache.hadoop.hive.metastore.api.Partition partObj = null; + + if ((!tableObj.isPartitioned()) + && privSubjectDesc.getPartSpec() != null) { + throw new HiveException( + "Table is not partitioned, but partition name is present: partSpec=" + + privSubjectDesc.getPartSpec().toString()); + } + + + String partName = null; + List partValues = null; + if (privSubjectDesc.getPartSpec() != null) { + partObj = db.getPartition(tableObj, privSubjectDesc.getPartSpec(), + false).getTPartition(); + partValues = partObj.getValues(); + partName = Warehouse.makePartName(tableObj.getPartCols(), partObj + .getValues()); + } + + for (PrivilegeDesc privDesc : privileges) { + List columns = privDesc.getColumns(); + Privilege priv = privDesc.getPrivilege(); + if (columns != null && columns.size() > 0) { + if (!priv.supportColumnLevel()) { + throw new HiveException(priv.getPriv() + + " does not support column level."); + } + if (privSubjectDesc == null || tableName == null) { + throw new HiveException( + "For user-level/database-level privileges, column sets should be null. columns=" + + columns); + } + for (int i = 0; i < columns.size(); i++) { + privBag.addToPrivileges(new HiveObjectPrivilege( + new HiveObjectRef(HiveObjectType.COLUMN, dbName, tableName, + partValues, columns.get(i)), null, null, new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType, grantOption))); + } + } else { + if (privSubjectDesc.getTable()) { + if (privSubjectDesc.getPartSpec() != null) { + privBag.addToPrivileges(new HiveObjectPrivilege( + new HiveObjectRef(HiveObjectType.PARTITION, dbName, + tableName, partValues, null), null, null, new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType, grantOption))); + } else { + privBag + .addToPrivileges(new HiveObjectPrivilege( + new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), null, null, new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType, grantOption))); + } + } else { + privBag.addToPrivileges(new HiveObjectPrivilege( + new HiveObjectRef(HiveObjectType.DATABASE, dbName, null, + null, null), null, null, new PrivilegeGrantInfo(priv.getPriv(), 0, grantor, grantorType, grantOption))); + } + } + } + } + + for (PrincipalDesc principal : principals) { + for (int i = 0; i < privBag.getPrivileges().size(); i++) { + HiveObjectPrivilege objPrivs = privBag.getPrivileges().get(i); + objPrivs.setPrincipalName(principal.getName()); + objPrivs.setPrincipalType(principal.getType()); + } + if (isGrant) { + db.grantPrivileges(privBag); + } else { + db.revokePrivileges(privBag); + } + } + } catch (Exception e) { + console.printError("Error: " + e.getMessage()); + return 1; + } + + return 0; + } + + private int roleDDL(RoleDDLDesc roleDDLDesc) { + RoleDDLDesc.RoleOperation operation = roleDDLDesc.getOperation(); + try { + if (operation.equals(RoleDDLDesc.RoleOperation.CREATE_ROLE)) { + db.createRole(roleDDLDesc.getName()); + } else if (operation.equals(RoleDDLDesc.RoleOperation.DROP_ROLE)) { + db.dropRole(roleDDLDesc.getName()); + } else if (operation.equals(RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT)) { + List roles = db.showRoleGrant(roleDDLDesc.getName(), roleDDLDesc + .getPrincipalType()); + if (roles != null && roles.size() > 0) { + Path resFile = new Path(roleDDLDesc.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); + for (Role role : roles) { + outStream.writeBytes("role name:" + role.getRoleName()); + outStream.write(terminator); + } + ((FSDataOutputStream) outStream).close(); + } + } else { + throw new HiveException("Unkown role operation " + + operation.getOperationName()); + } + } catch (HiveException e) { + console.printError("Error in role operation " + + operation.getOperationName() + " on role name " + + roleDDLDesc.getName() + ", error message " + e.getMessage()); + return 1; + } catch (IOException e) { + LOG.info("role ddl exception: " + stringifyException(e)); + return 1; + } + + return 0; + } + private int dropIndex(Hive db, DropIndexDesc dropIdx) throws HiveException { - db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(), - dropIdx.getIndexName(), true); + db.dropIndex(db.getCurrentDatabase(), dropIdx.getTableName(), dropIdx + .getIndexName(), true); return 0; } - private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException { + private int createIndex(Hive db, CreateIndexDesc crtIndex) + throws HiveException { - if( crtIndex.getSerde() != null) { + if (crtIndex.getSerde() != null) { validateSerDe(crtIndex.getSerde()); } - db - .createIndex( - crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(), - crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(), - crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(), - crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex.getIdxProps(), crtIndex.getTblProps(), - crtIndex.getSerdeProps(), crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex.getFieldEscape(), - crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment() - ); + db.createIndex(crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex + .getIndexTypeHandlerClass(), crtIndex.getIndexedCols(), crtIndex + .getIndexTableName(), crtIndex.getDeferredRebuild(), crtIndex + .getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(), + crtIndex.getStorageHandler(), crtIndex.getLocation(), crtIndex + .getIdxProps(), crtIndex.getTblProps(), crtIndex.getSerdeProps(), + crtIndex.getCollItemDelim(), crtIndex.getFieldDelim(), crtIndex + .getFieldEscape(), crtIndex.getLineDelim(), crtIndex + .getMapKeyDelim(), crtIndex.getIndexComment()); return 0; } - private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException { + private int alterIndex(Hive db, AlterIndexDesc alterIndex) + throws HiveException { String dbName = alterIndex.getDbName(); String baseTableName = alterIndex.getBaseTableName(); String indexName = alterIndex.getIndexName(); @@ -376,7 +779,7 @@ /** * Add a partition to a table. - * + * * @param db * Database to add the partition to. * @param addPartitionDesc @@ -384,15 +787,18 @@ * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException */ - private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) throws HiveException { + private int addPartition(Hive db, AddPartitionDesc addPartitionDesc) + throws HiveException { - Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); + Table tbl = db.getTable(addPartitionDesc.getDbName(), addPartitionDesc + .getTableName()); validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION); // If the add partition was created with IF NOT EXISTS, then we should // not throw an error if the specified part does exist. - Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false); + Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), + false); if (checkPart != null && addPartitionDesc.getIfNotExists()) { return 0; } @@ -407,15 +813,15 @@ Partition part = db .getPartition(tbl, addPartitionDesc.getPartSpec(), false); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, true)); return 0; } /** - * Rewrite the partition's metadata and force the pre/post execute hooks to - * be fired. - * + * Rewrite the partition's metadata and force the pre/post execute hooks to be + * fired. + * * @param db * @param touchDesc * @return @@ -438,7 +844,7 @@ throw new HiveException("Uable to update table"); } work.getInputs().add(new ReadEntity(tbl)); - work.getOutputs().add(new WriteEntity(tbl)); + work.getOutputs().add(new WriteEntity(tbl, true)); } else { Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false); if (part == null) { @@ -450,21 +856,22 @@ throw new HiveException(e); } work.getInputs().add(new ReadEntity(part)); - work.getOutputs().add(new WriteEntity(part)); + work.getOutputs().add(new WriteEntity(part, true)); } return 0; } + /** * Determines whether a partition has been archived - * + * * @param p * @return */ private boolean isArchived(Partition p) { Map params = p.getParameters(); - if ("true".equalsIgnoreCase(params.get( - org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED))) { + if ("true".equalsIgnoreCase(params + .get(org.apache.hadoop.hive.metastore.api.Constants.IS_ARCHIVED))) { return true; } else { return false; @@ -483,16 +890,20 @@ private String getOriginalLocation(Partition p) { Map params = p.getParameters(); - return params.get( - org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION); + return params + .get(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION); } private void setOriginalLocation(Partition p, String loc) { Map params = p.getParameters(); if (loc == null) { - params.remove(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION); + params + .remove(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION); } else { - params.put(org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION, loc); + params + .put( + org.apache.hadoop.hive.metastore.api.Constants.ORIGINAL_LOCATION, + loc); } } @@ -504,28 +915,32 @@ } /** - * Sets the appropriate attributes in the supplied Partition object to mark - * it as archived. Note that the metastore is not touched - a separate - * call to alter_partition is needed. - * - * @param p - the partition object to modify - * @param parentDir - the parent directory of the archive, which is the - * original directory that the partition's files resided in - * @param dirInArchive - the directory within the archive file that contains - * the partitions files - * @param archiveName - the name of the archive + * Sets the appropriate attributes in the supplied Partition object to mark it + * as archived. Note that the metastore is not touched - a separate call to + * alter_partition is needed. + * + * @param p + * - the partition object to modify + * @param parentDir + * - the parent directory of the archive, which is the original + * directory that the partition's files resided in + * @param dirInArchive + * - the directory within the archive file that contains the + * partitions files + * @param archiveName + * - the name of the archive * @throws URISyntaxException */ - private void setArchived(Partition p, Path parentDir, String dirInArchive, String archiveName) - throws URISyntaxException { - assert(isArchived(p) == false); + private void setArchived(Partition p, Path parentDir, String dirInArchive, + String archiveName) throws URISyntaxException { + assert (isArchived(p) == false); Map params = p.getParameters(); URI parentUri = parentDir.toUri(); String parentHost = parentUri.getHost(); String harHost = null; if (parentHost == null) { - harHost = ""; + harHost = ""; } else { harHost = parentUri.getScheme() + "-" + parentHost; } @@ -535,13 +950,14 @@ // har://underlyingfsscheme-host:port/archivepath URI harUri = null; if (dirInArchive.length() == 0) { - harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(), - getArchiveDirOnly(parentDir, archiveName), - parentUri.getQuery(), parentUri.getFragment()); + harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri + .getPort(), getArchiveDirOnly(parentDir, archiveName), parentUri + .getQuery(), parentUri.getFragment()); } else { - harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri.getPort(), - new Path(getArchiveDirOnly(parentDir, archiveName), dirInArchive).toUri().getPath(), - parentUri.getQuery(), parentUri.getFragment()); + harUri = new URI("har", parentUri.getUserInfo(), harHost, parentUri + .getPort(), new Path(getArchiveDirOnly(parentDir, archiveName), + dirInArchive).toUri().getPath(), parentUri.getQuery(), parentUri + .getFragment()); } setIsArchived(p, true); setOriginalLocation(p, parentDir.toString()); @@ -549,18 +965,19 @@ } /** - * Sets the appropriate attributes in the supplied Partition object to mark - * it as not archived. Note that the metastore is not touched - a separate - * call to alter_partition is needed. - * - * @param p - the partition to modify + * Sets the appropriate attributes in the supplied Partition object to mark it + * as not archived. Note that the metastore is not touched - a separate call + * to alter_partition is needed. + * + * @param p + * - the partition to modify */ private void setUnArchived(Partition p) { - assert(isArchived(p) == true); + assert (isArchived(p) == true); String parentDir = getOriginalLocation(p); setIsArchived(p, false); setOriginalLocation(p, null); - assert(parentDir != null); + assert (parentDir != null); p.setLocation(parentDir); } @@ -592,8 +1009,8 @@ } } - private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext) - throws HiveException { + private int archive(Hive db, AlterTableSimpleDesc simpleDesc, + DriverContext driverContext) throws HiveException { String dbName = simpleDesc.getDbName(); String tblName = simpleDesc.getTableName(); @@ -620,8 +1037,8 @@ originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX); if (pathExists(leftOverIntermediateOriginal)) { - console.printInfo("Deleting " + leftOverIntermediateOriginal + - " left over from a previous archiving operation"); + console.printInfo("Deleting " + leftOverIntermediateOriginal + + " left over from a previous archiving operation"); deleteDir(leftOverIntermediateOriginal); } @@ -647,10 +1064,10 @@ // Steps: // 1. Create the archive in a temporary folder // 2. Move the archive dir to an intermediate dir that is in at the same - // dir as the original partition dir. Call the new dir - // intermediate-archive. + // dir as the original partition dir. Call the new dir + // intermediate-archive. // 3. Rename the original partition dir to an intermediate dir. Call the - // renamed dir intermediate-original + // renamed dir intermediate-original // 4. Rename intermediate-archive to the original partition dir // 5. Change the metadata // 6. Delete the original partition files in intermediate-original @@ -663,20 +1080,22 @@ // ARCHIVE_INTERMEDIATE_DIR_SUFFIX that's the same level as the partition, // if it does not already exist. If it does exist, we assume the dir is good // to use as the move operation that created it is atomic. - if (!pathExists(intermediateArchivedDir) && - !pathExists(intermediateOriginalDir)) { + if (!pathExists(intermediateArchivedDir) + && !pathExists(intermediateOriginalDir)) { // First create the archive in a tmp dir so that if the job fails, the // bad files don't pollute the filesystem - Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI(originalDir.toUri()), "partlevel"); + Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI( + originalDir.toUri()), "partlevel"); - console.printInfo("Creating " + archiveName + " for " + originalDir.toString()); + console.printInfo("Creating " + archiveName + " for " + + originalDir.toString()); console.printInfo("in " + tmpDir); console.printInfo("Please wait... (this may take a while)"); // Create the Hadoop archive HadoopShims shim = ShimLoader.getHadoopShims(); - int ret=0; + int ret = 0; try { ret = shim.createHadoopArchive(conf, originalDir, tmpDir, archiveName); } catch (Exception e) { @@ -685,12 +1104,15 @@ if (ret != 0) { throw new HiveException("Error while creating HAR"); } - // Move from the tmp dir to an intermediate directory, in the same level as + // Move from the tmp dir to an intermediate directory, in the same level + // as // the partition directory. e.g. .../hr=12-intermediate-archived try { - console.printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir); + console + .printInfo("Moving " + tmpDir + " to " + intermediateArchivedDir); if (pathExists(intermediateArchivedDir)) { - throw new HiveException("The intermediate archive directory already exists."); + throw new HiveException( + "The intermediate archive directory already exists."); } fs.rename(tmpDir, intermediateArchivedDir); } catch (IOException e) { @@ -698,8 +1120,10 @@ } } else { if (pathExists(intermediateArchivedDir)) { - console.printInfo("Intermediate archive directory " + intermediateArchivedDir + - " already exists. Assuming it contains an archived version of the partition"); + console + .printInfo("Intermediate archive directory " + + intermediateArchivedDir + + " already exists. Assuming it contains an archived version of the partition"); } } @@ -710,12 +1134,12 @@ // Move the original parent directory to the intermediate original directory // if the move hasn't been made already if (!pathExists(intermediateOriginalDir)) { - console.printInfo("Moving " + originalDir + " to " + - intermediateOriginalDir); + console.printInfo("Moving " + originalDir + " to " + + intermediateOriginalDir); moveDir(fs, originalDir, intermediateOriginalDir); } else { - console.printInfo(intermediateOriginalDir + " already exists. " + - "Assuming it contains the original files in the partition"); + console.printInfo(intermediateOriginalDir + " already exists. " + + "Assuming it contains the original files in the partition"); } // If there's a failure from here to when the metadata is updated, @@ -726,18 +1150,18 @@ // Move the intermediate archived directory to the original parent directory if (!pathExists(originalDir)) { - console.printInfo("Moving " + intermediateArchivedDir + " to " + - originalDir); + console.printInfo("Moving " + intermediateArchivedDir + " to " + + originalDir); moveDir(fs, intermediateArchivedDir, originalDir); } else { - console.printInfo(originalDir + " already exists. " + - "Assuming it contains the archived version of the partition"); + console.printInfo(originalDir + " already exists. " + + "Assuming it contains the archived version of the partition"); } // Record this change in the metastore try { - boolean parentSettable = - conf.getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE); + boolean parentSettable = conf + .getBoolVar(HiveConf.ConfVars.HIVEHARPARENTDIRSETTABLE); // dirInArchive is the directory within the archive that has all the files // for this partition. With older versions of Hadoop, archiving a @@ -754,7 +1178,7 @@ String dirInArchive = ""; if (!parentSettable) { dirInArchive = originalDir.toUri().getPath(); - if(dirInArchive.length() > 1 && dirInArchive.charAt(0)=='/') { + if (dirInArchive.length() > 1 && dirInArchive.charAt(0) == '/') { dirInArchive = dirInArchive.substring(1); } } @@ -768,7 +1192,6 @@ // will not be deleted. The user will run ARCHIVE again to clear this up deleteDir(intermediateOriginalDir); - return 0; } @@ -789,7 +1212,8 @@ Partition p = db.getPartition(tbl, partSpec, false); if (tbl.getTableType() != TableType.MANAGED_TABLE) { - throw new HiveException("UNARCHIVE can only be performed on managed tables"); + throw new HiveException( + "UNARCHIVE can only be performed on managed tables"); } if (p == null) { @@ -798,12 +1222,13 @@ if (!isArchived(p)) { Path location = new Path(p.getLocation()); - Path leftOverArchiveDir = new Path(location.getParent(), - location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); + Path leftOverArchiveDir = new Path(location.getParent(), location + .getName() + + INTERMEDIATE_ARCHIVED_DIR_SUFFIX); if (pathExists(leftOverArchiveDir)) { - console.printInfo("Deleting " + leftOverArchiveDir + " left over " + - "from a previous unarchiving operation"); + console.printInfo("Deleting " + leftOverArchiveDir + " left over " + + "from a previous unarchiving operation"); deleteDir(leftOverArchiveDir); } @@ -817,21 +1242,21 @@ Path intermediateExtractedDir = new Path(originalLocation.getParent(), originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX); - Path tmpDir = new Path(driverContext - .getCtx() - .getExternalTmpFileURI(originalLocation.toUri())); + Path tmpDir = new Path(driverContext.getCtx().getExternalTmpFileURI( + originalLocation.toUri())); FileSystem fs = null; try { fs = tmpDir.getFileSystem(conf); // Verify that there are no files in the tmp dir, because if there are, it // would be copied to the partition - FileStatus [] filesInTmpDir = fs.listStatus(tmpDir); + FileStatus[] filesInTmpDir = fs.listStatus(tmpDir); if (filesInTmpDir != null && filesInTmpDir.length != 0) { for (FileStatus file : filesInTmpDir) { console.printInfo(file.getPath().toString()); } - throw new HiveException("Temporary directory " + tmpDir + " is not empty"); + throw new HiveException("Temporary directory " + tmpDir + + " is not empty"); } } catch (IOException e) { @@ -848,24 +1273,24 @@ // Clarification of terms: // - The originalLocation directory represents the original directory of the - // partition's files. They now contain an archived version of those files - // eg. hdfs:/warehouse/myTable/ds=1/ + // partition's files. They now contain an archived version of those files + // eg. hdfs:/warehouse/myTable/ds=1/ // - The source directory is the directory containing all the files that - // should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/ - // Note the har:/ scheme + // should be in the partition. e.g. har:/warehouse/myTable/ds=1/myTable.har/ + // Note the har:/ scheme // Steps: // 1. Extract the archive in a temporary folder // 2. Move the archive dir to an intermediate dir that is in at the same - // dir as originalLocation. Call the new dir intermediate-extracted. + // dir as originalLocation. Call the new dir intermediate-extracted. // 3. Rename the original partition dir to an intermediate dir. Call the - // renamed dir intermediate-archive + // renamed dir intermediate-archive // 4. Rename intermediate-extracted to the original partition dir // 5. Change the metadata // 6. Delete the archived partition files in intermediate-archive - if (!pathExists(intermediateExtractedDir) && - !pathExists(intermediateArchiveDir)) { + if (!pathExists(intermediateExtractedDir) + && !pathExists(intermediateArchiveDir)) { try { // Copy the files out of the archive into the temporary directory @@ -888,10 +1313,11 @@ throw new HiveException("Error while copying files from archive"); } - console.printInfo("Moving " + tmpDir + " to " + intermediateExtractedDir); + console.printInfo("Moving " + tmpDir + " to " + + intermediateExtractedDir); if (fs.exists(intermediateExtractedDir)) { - throw new HiveException("Invalid state: the intermediate extracted " + - "directory already exists."); + throw new HiveException("Invalid state: the intermediate extracted " + + "directory already exists."); } fs.rename(tmpDir, intermediateExtractedDir); } catch (Exception e) { @@ -904,14 +1330,15 @@ if (!pathExists(intermediateArchiveDir)) { try { - console.printInfo("Moving " + originalLocation + " to " + intermediateArchiveDir); + console.printInfo("Moving " + originalLocation + " to " + + intermediateArchiveDir); fs.rename(originalLocation, intermediateArchiveDir); } catch (IOException e) { throw new HiveException(e); } } else { - console.printInfo(intermediateArchiveDir + " already exists. " + - "Assuming it contains the archived version of the partition"); + console.printInfo(intermediateArchiveDir + " already exists. " + + "Assuming it contains the archived version of the partition"); } // If there is a failure from here to until when the metadata is changed, @@ -922,14 +1349,15 @@ // (containing the archived version of the files) to intermediateArchiveDir if (!pathExists(originalLocation)) { try { - console.printInfo("Moving " + intermediateExtractedDir + " to " + originalLocation); + console.printInfo("Moving " + intermediateExtractedDir + " to " + + originalLocation); fs.rename(intermediateExtractedDir, originalLocation); } catch (IOException e) { throw new HiveException(e); } } else { - console.printInfo(originalLocation + " already exists. " + - "Assuming it contains the extracted files in the partition"); + console.printInfo(originalLocation + " already exists. " + + "Assuming it contains the extracted files in the partition"); } setUnArchived(p); @@ -945,8 +1373,8 @@ return 0; } - private void validateAlterTableType( - Table tbl, AlterTableDesc.AlterTableTypes alterType) throws HiveException { + private void validateAlterTableType(Table tbl, + AlterTableDesc.AlterTableTypes alterType) throws HiveException { if (tbl.isView()) { switch (alterType) { @@ -954,8 +1382,7 @@ // allow this form break; default: - throw new HiveException( - "Cannot use this form of ALTER TABLE on a view"); + throw new HiveException("Cannot use this form of ALTER TABLE on a view"); } } @@ -968,7 +1395,7 @@ * MetastoreCheck, see if the data in the metastore matches what is on the * dfs. Current version checks for tables and partitions that are either * missing on disk on in the metastore. - * + * * @param db * The database in question. * @param msckDesc @@ -980,8 +1407,8 @@ List repairOutput = new ArrayList(); try { HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db); - checker.checkMetastore(db.getCurrentDatabase(), msckDesc - .getTableName(), msckDesc.getPartSpecs(), result); + checker.checkMetastore(db.getCurrentDatabase(), msckDesc.getTableName(), + msckDesc.getPartSpecs(), result); if (msckDesc.isRepairPartitions()) { Table table = db.getTable(msckDesc.getTableName()); for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) { @@ -1046,7 +1473,7 @@ /** * Write the result of msck to a writer. - * + * * @param result * The result we're going to write * @param msg @@ -1080,7 +1507,7 @@ /** * Write a list of partitions to a file. - * + * * @param db * The database in question. * @param showParts @@ -1089,7 +1516,8 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveException { + private int showPartitions(Hive db, ShowPartitionsDesc showParts) + throws HiveException { // get the partitions for the table and populate the output String tabName = showParts.getTabName(); Table tbl = null; @@ -1102,10 +1530,11 @@ return 1; } if (showParts.getPartSpec() != null) { - parts = db.getPartitionNames(db.getCurrentDatabase(), - tbl.getTableName(), showParts.getPartSpec(), (short) -1); + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), + showParts.getPartSpec(), (short) -1); } else { - parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), (short) -1); + parts = db.getPartitionNames(db.getCurrentDatabase(), tbl.getTableName(), + (short) -1); } // write the results in the file @@ -1136,7 +1565,7 @@ /** * Write a list of indexes to a file. - * + * * @param db * The database in question. * @param showIndexes @@ -1145,7 +1574,8 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showIndexes(Hive db, ShowIndexesDesc showIndexes) throws HiveException { + private int showIndexes(Hive db, ShowIndexesDesc showIndexes) + throws HiveException { // get the indexes for the table and populate the output String tableName = showIndexes.getTableName(); Table tbl = null; @@ -1153,7 +1583,8 @@ tbl = db.getTable(tableName); - indexes = db.getIndexes(db.getCurrentDatabase(), tbl.getTableName(), (short) -1); + indexes = db.getIndexes(db.getCurrentDatabase(), tbl.getTableName(), + (short) -1); // write the results in the file try { @@ -1168,9 +1599,9 @@ outStream.write(terminator); } - for (Index index : indexes) - { - outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(index)); + for (Index index : indexes) { + outStream.writeBytes(MetaDataFormatUtils + .getAllColumnsInformation(index)); } ((FSDataOutputStream) outStream).close(); @@ -1190,14 +1621,15 @@ /** * Write a list of the available databases to a file. - * + * * @param showDatabases * These are the databases we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException { + private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) + throws HiveException { // get the databases for the desired pattern - populate the output stream List databases = null; if (showDatabasesDesc.getPattern() != null) { @@ -1234,7 +1666,7 @@ /** * Write a list of the tables in the database to a file. - * + * * @param db * The database in question. * @param showTbls @@ -1282,7 +1714,7 @@ /** * Write a list of the user defined functions to a file. - * + * * @param showFuncs * are the functions we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. @@ -1328,7 +1760,7 @@ /** * Write a list of the current locks to a file. - * + * * @param showLocks * the locks we're interested in. * @return Returns 0 when execution succeeds and above 0 if it fails. @@ -1352,32 +1784,31 @@ if (showLocks.getTableName() == null) { locks = lockMgr.getLocks(false, isExt); - } - else { + } else { locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(), - showLocks.getPartSpec()), - true, isExt); + showLocks.getPartSpec()), true, isExt); } Collections.sort(locks, new Comparator() { - @Override - public int compare(HiveLock o1, HiveLock o2) { - int cmp = o1.getHiveLockObject().getName().compareTo(o2.getHiveLockObject().getName()); - if (cmp == 0) { - if (o1.getHiveLockMode() == o2.getHiveLockMode()) { - return cmp; - } - // EXCLUSIVE locks occur before SHARED locks - if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) { - return -1; - } - return +1; + @Override + public int compare(HiveLock o1, HiveLock o2) { + int cmp = o1.getHiveLockObject().getName().compareTo( + o2.getHiveLockObject().getName()); + if (cmp == 0) { + if (o1.getHiveLockMode() == o2.getHiveLockMode()) { + return cmp; + } + // EXCLUSIVE locks occur before SHARED locks + if (o1.getHiveLockMode() == HiveLockMode.EXCLUSIVE) { + return -1; } - return cmp; + return +1; } + return cmp; + } - }); + }); Iterator locksIter = locks.iterator(); @@ -1412,7 +1843,7 @@ /** * Lock the table/partition specified - * + * * @param lockTbl * the table/partition to be locked along with the mode * @return Returns 0 when execution succeeds and above 0 if it fails. @@ -1428,19 +1859,18 @@ HiveLockMode mode = HiveLockMode.valueOf(lockTbl.getMode()); String tabName = lockTbl.getTableName(); - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); + Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); if (tbl == null) { throw new HiveException("Table " + tabName + " does not exist "); } Map partSpec = lockTbl.getPartSpec(); - HiveLockObjectData lockData = - new HiveLockObjectData(lockTbl.getQueryId(), - String.valueOf(System.currentTimeMillis()), - "EXPLICIT"); + HiveLockObjectData lockData = new HiveLockObjectData(lockTbl.getQueryId(), + String.valueOf(System.currentTimeMillis()), "EXPLICIT"); if (partSpec == null) { - HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true, 0, 0); + HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, + true, 0, 0); if (lck == null) { return 1; } @@ -1449,9 +1879,11 @@ Partition par = db.getPartition(tbl, partSpec, false); if (par == null) { - throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist"); + throw new HiveException("Partition " + partSpec + " for table " + tabName + + " does not exist"); } - HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true, 0, 0); + HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true, + 0, 0); if (lck == null) { return 1; } @@ -1459,21 +1891,21 @@ } private HiveLockObject getHiveObject(String tabName, - Map partSpec) throws HiveException { - Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); + Map partSpec) throws HiveException { + Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName); if (tbl == null) { throw new HiveException("Table " + tabName + " does not exist "); } HiveLockObject obj = null; - if (partSpec == null) { + if (partSpec == null) { obj = new HiveLockObject(tbl, null); - } - else { + } else { Partition par = db.getPartition(tbl, partSpec, false); if (par == null) { - throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist"); + throw new HiveException("Partition " + partSpec + " for table " + + tabName + " does not exist"); } obj = new HiveLockObject(par, null); } @@ -1482,7 +1914,7 @@ /** * Unlock the table/partition specified - * + * * @param unlockTbl * the table/partition to be unlocked * @return Returns 0 when execution succeeds and above 0 if it fails. @@ -1514,7 +1946,7 @@ /** * Shows a description of a function. - * + * * @param descFunc * is the function we are describing * @throws HiveException @@ -1599,9 +2031,10 @@ outStream.writeBytes(params.toString()); } - } else { - outStream.writeBytes("No such database: " + descDatabase.getDatabaseName()); - } + } else { + outStream.writeBytes("No such database: " + + descDatabase.getDatabaseName()); + } outStream.write(terminator); @@ -1621,20 +2054,22 @@ /** * Write the status of tables to a file. - * + * * @param db * The database in question. * @param showTblStatus * tables we are interested in * @return Return 0 when execution succeeds and above 0 if it fails. */ - private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException { + private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) + throws HiveException { // get the tables for the desired pattenn - populate the output stream List
tbls = new ArrayList
(); Map part = showTblStatus.getPartSpec(); Partition par = null; if (part != null) { - Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern()); + Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus + .getPattern()); par = db.getPartition(tbl, part, false); if (par == null) { throw new HiveException("Partition " + part + " for table " @@ -1742,7 +2177,7 @@ /** * Write the description of a table to a file. - * + * * @param db * The database in question. * @param descTbl @@ -1751,7 +2186,8 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { + private int describeTable(Hive db, DescTableDesc descTbl) + throws HiveException { String colPath = descTbl.getTableName(); String tableName = colPath.substring(0, colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.')); @@ -1794,9 +2230,9 @@ LOG.info("DDLTask: got data for " + tbl.getTableName()); - Path resFile = new Path(descTbl.getResFile()); - FileSystem fs = resFile.getFileSystem(conf); - DataOutput outStream = fs.create(resFile); + Path resFile = new Path(descTbl.getResFile()); + FileSystem fs = resFile.getFileSystem(conf); + DataOutput outStream = fs.create(resFile); if (colPath.equals(tableName)) { if (!descTbl.isFormatted()) { @@ -1804,16 +2240,21 @@ if (tableName.equals(colPath)) { cols.addAll(tbl.getPartCols()); } - outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols)); + outStream + .writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols)); } else { - outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl)); + outStream.writeBytes(MetaDataFormatUtils + .getAllColumnsInformation(tbl)); } } else { - List cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer()); + List cols = Hive.getFieldsFromDeserializer(colPath, tbl + .getDeserializer()); if (descTbl.isFormatted()) { - outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols)); + outStream.writeBytes(MetaDataFormatUtils + .getAllColumnsInformation(cols)); } else { - outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols)); + outStream + .writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols)); } } @@ -1821,7 +2262,8 @@ if (descTbl.isFormatted()) { if (part != null) { - outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part)); + outStream.writeBytes(MetaDataFormatUtils + .getPartitionInformation(part)); } else { outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl)); } @@ -1866,6 +2308,46 @@ return 0; } + public static void writeGrantInfo(DataOutput outStream, + PrincipalType principalType, String principalName, String dbName, + String tableName, String partName, String columnName, + PrivilegeGrantInfo grantInfo) throws IOException { + + String privilege = grantInfo.getPrivilege(); + int createTime = grantInfo.getCreateTime(); + String grantor = grantInfo.getGrantor(); + + if (dbName != null) { + writeKeyValuePair(outStream, "database", dbName); + } + if (tableName != null) { + writeKeyValuePair(outStream, "table", tableName); + } + if (partName != null) { + writeKeyValuePair(outStream, "partition", partName); + } + if (columnName != null) { + writeKeyValuePair(outStream, "columnName", columnName); + } + + writeKeyValuePair(outStream, "principalName", principalName); + writeKeyValuePair(outStream, "principalType", "" + principalType); + writeKeyValuePair(outStream, "privilege", privilege); + writeKeyValuePair(outStream, "grantTime", "" + createTime); + if (grantor != null) { + writeKeyValuePair(outStream, "grantor", grantor); + } + } + + private static void writeKeyValuePair(DataOutput outStream, String key, + String value) throws IOException { + outStream.write(terminator); + outStream.writeBytes(key); + outStream.write(separator); + outStream.writeBytes(value); + outStream.write(separator); + } + private void writeFileSystemStats(DataOutput outStream, List locations, Path tabLoc, boolean partSpecified, int indent) throws IOException { long totalFileSize = 0; @@ -1990,7 +2472,7 @@ /** * Alter a given table. - * + * * @param db * The database in question. * @param alterTbl @@ -2004,9 +2486,9 @@ Table tbl = db.getTable(alterTbl.getOldName()); Partition part = null; - if(alterTbl.getPartSpec() != null) { + if (alterTbl.getPartSpec() != null) { part = db.getPartition(tbl, alterTbl.getPartSpec(), false); - if(part == null) { + if (part == null) { console.printError("Partition : " + alterTbl.getPartSpec().toString() + " does not exist."); return 1; @@ -2153,9 +2635,10 @@ tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl .getDeserializer())); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) { - if(part != null) { + if (part != null) { part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat()); - part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat()); + part.getTPartition().getSd() + .setOutputFormat(alterTbl.getOutputFormat()); if (alterTbl.getSerdeName() != null) { part.getTPartition().getSd().getSerdeInfo().setSerializationLib( alterTbl.getSerdeName()); @@ -2169,10 +2652,11 @@ } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { boolean protectModeEnable = alterTbl.isProtectModeEnable(); - AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType(); + AlterTableDesc.ProtectModeType protectMode = alterTbl + .getProtectModeType(); ProtectMode mode = null; - if(part != null) { + if (part != null) { mode = part.getProtectMode(); } else { mode = tbl.getProtectMode(); @@ -2249,7 +2733,7 @@ return 1; } - if(part == null) { + if (part == null) { if (!updateModifiedParameters(tbl.getTTable().getParameters(), conf)) { return 1; } @@ -2285,7 +2769,7 @@ // contains the new table. This is needed for rename - both the old and the // new table names are // passed - if(part != null) { + if (part != null) { work.getInputs().add(new ReadEntity(part)); work.getOutputs().add(new WriteEntity(part)); } else { @@ -2297,7 +2781,7 @@ /** * Drop a given table. - * + * * @param db * The database in question. * @param dropTbl @@ -2331,19 +2815,19 @@ if (dropTbl.getPartSpecs() == null) { if (tbl != null && !tbl.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " is protected from being dropped"); + throw new HiveException("Table " + tbl.getTableName() + + " is protected from being dropped"); } // We should check that all the partitions of the table can be dropped if (tbl != null && tbl.isPartitioned()) { List listPartitions = db.getPartitions(tbl); - for (Partition p: listPartitions) { - if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition" + p.getName() + - " is protected from being dropped"); - } + for (Partition p : listPartitions) { + if (!p.canDrop()) { + throw new HiveException("Table " + tbl.getTableName() + + " Partition" + p.getName() + + " is protected from being dropped"); + } } } @@ -2354,14 +2838,15 @@ } } else { // get all partitions of the table - List partitionNames = - db.getPartitionNames(db.getCurrentDatabase(), dropTbl.getTableName(), (short) -1); + List partitionNames = db.getPartitionNames(db + .getCurrentDatabase(), dropTbl.getTableName(), (short) -1); Set> partitions = new HashSet>(); for (String partitionName : partitionNames) { try { partitions.add(Warehouse.makeSpecFromName(partitionName)); } catch (MetaException e) { - LOG.warn("Unrecognized partition name from metastore: " + partitionName); + LOG.warn("Unrecognized partition name from metastore: " + + partitionName); } } // drop partitions in the list @@ -2381,9 +2866,9 @@ if (match) { Partition p = db.getPartition(tbl, part, false); if (!p.canDrop()) { - throw new HiveException("Table " + tbl.getTableName() + - " Partition " + p.getName() + - " is protected from being dropped"); + throw new HiveException("Table " + tbl.getTableName() + + " Partition " + p.getName() + + " is protected from being dropped"); } partsToDelete.add(p); @@ -2407,13 +2892,14 @@ /** * Update last_modified_by and last_modified_time parameters in parameter map. - * + * * @param params * Parameters. * @param user * user that is doing the updating. */ - private boolean updateModifiedParameters(Map params, HiveConf conf) { + private boolean updateModifiedParameters(Map params, + HiveConf conf) { String user = null; try { user = conf.getUser(); @@ -2424,7 +2910,8 @@ } params.put("last_modified_by", user); - params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000)); + params.put("last_modified_time", Long + .toString(System.currentTimeMillis() / 1000)); return true; } @@ -2444,6 +2931,7 @@ /** * Create a Database + * * @param db * @param crtDb * @return Always returns 0 @@ -2464,6 +2952,7 @@ /** * Drop a Database + * * @param db * @param dropDb * @return Always returns 0 @@ -2478,6 +2967,7 @@ /** * Switch to a different Database + * * @param db * @param switchDb * @return Always returns 0 @@ -2487,16 +2977,16 @@ throws HiveException { String dbName = switchDb.getDatabaseName(); if (!db.databaseExists(dbName)) { - throw new HiveException("ERROR: The database " + dbName + " does not exist."); + throw new HiveException("ERROR: The database " + dbName + + " does not exist."); } db.setCurrentDatabase(dbName); return 0; } - /** * Create a new table. - * + * * @param db * The database in question. * @param crtTbl @@ -2522,25 +3012,28 @@ if (crtTbl.getStorageHandler() != null) { tbl.setProperty( - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE, - crtTbl.getStorageHandler()); + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_STORAGE, + crtTbl.getStorageHandler()); } HiveStorageHandler storageHandler = tbl.getStorageHandler(); /* * We use LazySimpleSerDe by default. - * + * * If the user didn't specify a SerDe, and any of the columns are not simple * types, we will have to use DynamicSerDe instead. */ if (crtTbl.getSerName() == null) { if (storageHandler == null) { - LOG.info("Default to LazySimpleSerDe for table " + crtTbl.getTableName()); - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); + LOG.info("Default to LazySimpleSerDe for table " + + crtTbl.getTableName()); + tbl + .setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class + .getName()); } else { String serDeClassName = storageHandler.getSerDeClass().getName(); LOG.info("Use StorageHandler-supplied " + serDeClassName - + " for table " + crtTbl.getTableName()); + + " for table " + crtTbl.getTableName()); tbl.setSerializationLib(serDeClassName); } } else { @@ -2569,7 +3062,7 @@ if (crtTbl.getSerdeProps() != null) { Iterator> iter = crtTbl.getSerdeProps().entrySet() - .iterator(); + .iterator(); while (iter.hasNext()) { Entry m = iter.next(); tbl.setSerdeParam(m.getKey(), m.getValue()); @@ -2595,10 +3088,9 @@ tbl.setInputFormatClass(crtTbl.getInputFormat()); tbl.setOutputFormatClass(crtTbl.getOutputFormat()); - tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName()); tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); + tbl.getOutputFormatClass().getName()); if (crtTbl.isExternal()) { tbl.setProperty("EXTERNAL", "TRUE"); @@ -2651,7 +3143,7 @@ /** * Create a new table like an existing table. - * + * * @param db * The database in question. * @param crtTbl @@ -2660,7 +3152,8 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException { + private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) + throws HiveException { // Get the existing table Table tbl = db.getTable(crtTbl.getLikeTableName()); @@ -2678,7 +3171,8 @@ tbl.unsetDataLocation(); } - // we should reset table specific parameters including (stats, lastDDLTime etc.) + // we should reset table specific parameters including (stats, lastDDLTime + // etc.) Map params = tbl.getParameters(); params.clear(); @@ -2690,7 +3184,7 @@ /** * Create a new view. - * + * * @param db * The database in question. * @param crtView Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (working copy) @@ -174,7 +174,7 @@ .getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()), tbd.getHoldDDLTime()); if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(table)); + work.getOutputs().add(new WriteEntity(table, true)); } } else { LOG.info("Partition is: " + tbd.getPartitionSpec().toString()); @@ -216,7 +216,7 @@ for (LinkedHashMap partSpec: dp) { Partition partn = db.getPartition(table, partSpec, false); - WriteEntity enty = new WriteEntity(partn); + WriteEntity enty = new WriteEntity(partn, true); if (work.getOutputs() != null) { work.getOutputs().add(enty); } @@ -249,7 +249,7 @@ dc = new DataContainer(table.getTTable(), partn.getTPartition()); // add this partition to post-execution hook if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(partn)); + work.getOutputs().add(new WriteEntity(partn, true)); } } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java (working copy) @@ -233,7 +233,7 @@ reportProgress(); numMapRowsRead++; - + // the big table has reached a new key group. try to let the small tables // catch up with the big table. if (nextKeyGroup) { Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java (revision 0) @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata; + +public class AuthorizationException extends RuntimeException { + /** + */ + private static final long serialVersionUID = 1L; + + public AuthorizationException() { + super(); + } + + /** + * Constructs an {@link AuthorizationException} with the specified detail + * message. + * + * @param s + * the detail message. + */ + public AuthorizationException(String message) { + super(message); + } + + /** + * Constructs an {@link AuthorizationException} with the specified cause. + * + * @param cause + * the cause + */ + public AuthorizationException(Throwable cause) { + super(cause); + } + + + public AuthorizationException(String message, Throwable cause) { + super(message, cause); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (working copy) @@ -57,14 +57,25 @@ import org.apache.hadoop.hive.metastore.api.Constants; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; +import org.apache.hadoop.hive.ql.session.CreateTableAutomaticGrant; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -86,7 +97,7 @@ private HiveConf conf = null; private IMetaStoreClient metaStoreClient; private String currentDatabase; - + private static ThreadLocal hiveDB = new ThreadLocal() { @Override protected synchronized Object initialValue() { @@ -432,7 +443,17 @@ if (tbl.getParameters() != null) { tbl.getParameters().remove(Constants.DDL_TIME); } - getMSC().createTable(tbl.getTTable()); + org.apache.hadoop.hive.metastore.api.Table tTbl = tbl.getTTable(); + PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet(); + SessionState ss = SessionState.get(); + CreateTableAutomaticGrant grants = ss.getCreateTableGrants(); + if (grants!= null) { + principalPrivs.setUserPrivileges(grants.getUserGrants()); + principalPrivs.setGroupPrivileges(grants.getGroupGrants()); + principalPrivs.setRolePrivileges(grants.getRoleGrants()); + tTbl.setPrivileges(principalPrivs); + } + getMSC().createTable(tTbl); } catch (AlreadyExistsException e) { if (!ifNotExists) { throw new HiveException(e); @@ -763,7 +784,7 @@ } catch (NoSuchObjectException e) { if (throwException) { LOG.error(StringUtils.stringifyException(e)); - throw new InvalidTableException("Table not found ", tableName); + throw new InvalidTableException("Table " + tableName + " not found ", tableName); } return null; } catch (Exception e) { @@ -903,6 +924,36 @@ } } + public boolean grantPrivileges(PrivilegeBag privileges) + throws HiveException { + try { + return getMSC().grant_privileges(privileges); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param userName + * principal name + * @param isRole + * is the given principal name a role + * @param isGroup + * is the given principal name a group + * @param privileges + * a bag of privileges + * @return + * @throws HiveException + */ + public boolean revokePrivileges(PrivilegeBag privileges) + throws HiveException { + try { + return getMSC().revoke_privileges(privileges); + } catch (Exception e) { + throw new HiveException(e); + } + } + /** * Query metadata to see if a database with the given name already exists. * @@ -1180,7 +1231,8 @@ } org.apache.hadoop.hive.metastore.api.Partition tpart = null; try { - tpart = getMSC().getPartition(tbl.getDbName(), tbl.getTableName(), pvals); + tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), + tbl.getTableName(), pvals, getUserName(), getGroupNames()); } catch (NoSuchObjectException nsoe) { // this means no partition exists for the given partition // key value pairs - thrift cannot handle null return values, hence @@ -1272,8 +1324,8 @@ if (tbl.isPartitioned()) { List tParts; try { - tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), - (short) -1); + tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), + (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); @@ -1325,8 +1377,8 @@ List partitions = null; try { - partitions = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), - partialPvals, (short) -1); + partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), + partialPvals, (short) -1, getUserName(), getGroupNames()); } catch (Exception e) { throw new HiveException(e); } @@ -1396,6 +1448,218 @@ public void setCurrentDatabase(String currentDatabase) { this.currentDatabase = currentDatabase; } + + public void createRole(String roleName) throws HiveException { + try { + getMSC().create_role(roleName, ""); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public void dropRole(String roleName) throws HiveException { + try { + getMSC().drop_role(roleName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showRoleGrant(String principalName, PrincipalType principalType) throws HiveException { + try { + return getMSC().list_roles(principalName, principalType); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public boolean addRoleMember(String roleName, String userName, + PrincipalType principalType) throws HiveException { + try { + return getMSC().add_role_member(roleName, userName, principalType); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public boolean removeRoleMember(String roleName, String userName, + PrincipalType principalType) throws HiveException { + try { + return getMSC().remove_role_member(roleName, userName, principalType); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List listRoles(String userName, PrincipalType principalType) + throws HiveException { + try { + return getMSC().list_roles(userName, principalType); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_user_privilege_set(String user_name, + List group_names) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); + return getMSC().get_privilege_set(hiveObj, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * database name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_db_privilege_set(String db_name, + String user_name, List group_names) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.DATABASE, db_name, null, null, null); + return getMSC().get_privilege_set(hiveObj, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_table_privilege_set(String db_name, + String table_name, String user_name, List group_names) + throws HiveException{ + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.TABLE, db_name, table_name, null, null); + return getMSC().get_privilege_set(hiveObj, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * db name + * @param table_name + * table name + * @param part_values + * partition values + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_partition_privilege_set(String db_name, + String table_name, List part_values, String user_name, + List group_names) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.PARTITION, db_name, table_name, part_values, null); + return getMSC().get_privilege_set(hiveObj, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** + * @param db_name + * database name + * @param table_name + * table name + * @param part_name + * partition name + * @param column_name + * column name + * @param user_name + * user name + * @param group_names + * group names + * @return + */ + public PrincipalPrivilegeSet get_column_privilege_set(String db_name, + String table_name, List part_values, String column_name, + String user_name, List group_names) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.COLUMN, db_name, table_name, part_values, column_name); + return getMSC().get_privilege_set(hiveObj, user_name, group_names); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showUserLevelGrant(String principalName, + PrincipalType principalType) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.GLOBAL, null, null, null, null); + return getMSC().list_privileges(principalName, principalType, hiveObj); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showDBLevelGrant(String principalName, + PrincipalType principalType, String dbName) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.DATABASE, dbName, null, null, null); + return getMSC().list_privileges(principalName, principalType, hiveObj); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showTableLevelGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.TABLE, dbName, tableName, null, null); + return getMSC().list_privileges(principalName, principalType, hiveObj); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showPartitionGrant( + String principalName, PrincipalType principalType, String dbName, + String tableName, List partValues) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.PARTITION, dbName, tableName, partValues, null); + return getMSC().list_privileges(principalName, principalType, hiveObj); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public List showColumnGrant(String principalName, + PrincipalType principalType, String dbName, String tableName, + List partValues, String columnName) throws HiveException { + try { + HiveObjectRef hiveObj = new HiveObjectRef(HiveObjectType.COLUMN, dbName, tableName, partValues, columnName); + return getMSC().list_privileges(principalName, principalType, hiveObj); + } catch (Exception e) { + throw new HiveException(e); + } + } static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf, boolean replace) throws HiveException { @@ -1636,6 +1900,22 @@ } return metaStoreClient; } + + private String getUserName() { + SessionState ss = SessionState.get(); + if (ss != null && ss.getAuthenticator() != null) { + return ss.getAuthenticator().getUserName(); + } + return null; + } + + private List getGroupNames() { + SessionState ss = SessionState.get(); + if (ss != null && ss.getAuthenticator() != null) { + return ss.getAuthenticator().getGroupNames(); + } + return null; + } public static List getFieldsFromDeserializer(String name, Deserializer serde) throws HiveException { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java (working copy) @@ -71,7 +71,7 @@ protected Context ctx; protected HashMap idToTableNameMap; - + public static int HIVE_COLUMN_ORDER_ASC = 1; public static int HIVE_COLUMN_ORDER_DESC = 0; @@ -719,4 +719,8 @@ } return partSpec; } + + public Hive getDb() { + return db; + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -71,6 +72,13 @@ import org.apache.hadoop.hive.ql.plan.AlterTableSimpleDesc; import org.apache.hadoop.hive.ql.plan.CreateDatabaseDesc; import org.apache.hadoop.hive.ql.plan.CreateIndexDesc; +import org.apache.hadoop.hive.ql.plan.GrantDesc; +import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; +import org.apache.hadoop.hive.ql.plan.PrincipalDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; +import org.apache.hadoop.hive.ql.plan.RevokeDesc; +import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DescDatabaseDesc; import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; @@ -83,6 +91,7 @@ import org.apache.hadoop.hive.ql.plan.MsckDesc; import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc; import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowGrantDesc; import org.apache.hadoop.hive.ql.plan.ShowIndexesDesc; import org.apache.hadoop.hive.ql.plan.ShowLocksDesc; import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; @@ -91,6 +100,9 @@ import org.apache.hadoop.hive.ql.plan.SwitchDatabaseDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.UnlockTableDesc; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.hive.ql.security.authorization.PrivilegeRegistry; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.mapred.TextInputFormat; @@ -254,11 +266,243 @@ analyzeDropDatabase(ast); } else if (ast.getToken().getType() == TOK_SWITCHDATABASE) { analyzeSwitchDatabase(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_CREATEROLE) { + analyzeCreateRole(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_DROPROLE) { + analyzeDropRole(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_SHOW_ROLE_GRANT) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowRoleGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_GRANT_ROLE) { + analyzeGrantRevokeRole(true, ast); + } else if (ast.getToken().getType() == HiveParser.TOK_REVOKE_ROLE) { + analyzeGrantRevokeRole(false, ast); + } else if (ast.getToken().getType() == HiveParser.TOK_GRANT) { + analyzeGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_SHOW_GRANT) { + ctx.setResFile(new Path(ctx.getLocalTmpFileURI())); + analyzeShowGrant(ast); + } else if (ast.getToken().getType() == HiveParser.TOK_REVOKE) { + analyzeRevoke(ast); } else { throw new SemanticException("Unsupported command."); } } + private void analyzeGrantRevokeRole(boolean grant, ASTNode ast) { + List principalDesc = analyzePrincipalListDef( + (ASTNode) ast.getChild(0)); + List roles = new ArrayList(); + for (int i = 1; i < ast.getChildCount(); i++) { + roles.add(unescapeIdentifier(ast.getChild(i).getText())); + } + + GrantRevokeRoleDDL grantRevokeRoleDDL = new GrantRevokeRoleDDL(grant, roles, principalDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + grantRevokeRoleDDL), conf)); + } + + private void analyzeShowGrant(ASTNode ast) throws SemanticException { + PrivilegeObjectDesc privHiveObj = null; + + ASTNode principal = (ASTNode) ast.getChild(0); + PrincipalType type = PrincipalType.USER; + switch (principal.getType()) { + case HiveParser.TOK_USER: + type = PrincipalType.USER; + break; + case HiveParser.TOK_GROUP: + type = PrincipalType.GROUP; + break; + case HiveParser.TOK_ROLE: + type = PrincipalType.ROLE; + break; + } + String principalName = unescapeIdentifier(principal.getChild(0).getText()); + PrincipalDesc principalDesc = new PrincipalDesc(principalName, type); + List cols = null; + if (ast.getChildCount() > 1) { + ASTNode child = (ASTNode) ast.getChild(1); + if (child.getToken().getType() == HiveParser.TOK_PRIV_OBJECT_COL) { + privHiveObj = new PrivilegeObjectDesc(); + privHiveObj.setObject(unescapeIdentifier(child.getChild(0).getText())); + if (child.getChildCount() > 1) { + for (int i = 1; i < child.getChildCount(); i++) { + ASTNode grandChild = (ASTNode) child.getChild(i); + if (grandChild.getToken().getType() == HiveParser.TOK_PARTSPEC) { + privHiveObj.setPartSpec(DDLSemanticAnalyzer.getPartSpec(grandChild)); + } else if (grandChild.getToken().getType() == HiveParser.TOK_TABCOLNAME) { + cols = getColumnNames((ASTNode) grandChild); + } else { + privHiveObj.setTable(child.getChild(i) != null); + } + } + } + } + } + + if (privHiveObj == null && cols != null) { + throw new SemanticException( + "For user-level privileges, column sets should be null. columns=" + + cols.toString()); + } + + ShowGrantDesc showGrant = new ShowGrantDesc(ctx.getResFile().toString(), + principalDesc, privHiveObj, cols); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + showGrant), conf)); + } + + private void analyzeGrant(ASTNode ast) throws SemanticException { + List privilegeDesc = analyzePrivilegeListDef( + (ASTNode) ast.getChild(0)); + List principalDesc = analyzePrincipalListDef( + (ASTNode) ast.getChild(1)); + boolean grantOption = false; + PrivilegeObjectDesc subjectObj = null; + + if (ast.getChildCount() > 2) { + for (int i = 2; i < ast.getChildCount(); i++) { + ASTNode astChild = (ASTNode) ast.getChild(i); + if (astChild.getType() == HiveParser.TOK_GRANT_WITH_OPTION) { + grantOption = true; + } else if (astChild.getType() == HiveParser.TOK_PRIV_OBJECT) { + subjectObj = analyzePrivilegeObject(astChild); + } + } + } + + String userName = null; + if (SessionState.get() != null + && SessionState.get().getAuthenticator() != null) { + userName = SessionState.get().getAuthenticator().getUserName(); + } + + GrantDesc grantDesc = new GrantDesc(subjectObj, privilegeDesc, + principalDesc, userName, PrincipalType.USER, grantOption); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + grantDesc), conf)); + } + + private void analyzeRevoke(ASTNode ast) throws SemanticException { + List privilegeDesc = analyzePrivilegeListDef( + (ASTNode) ast.getChild(0)); + List principalDesc = analyzePrincipalListDef( + (ASTNode) ast.getChild(1)); + PrivilegeObjectDesc hiveObj = null; + if (ast.getChildCount() > 2) { + ASTNode astChild = (ASTNode) ast.getChild(2); + hiveObj = analyzePrivilegeObject(astChild); + } + + RevokeDesc revokeDesc = new RevokeDesc(privilegeDesc, principalDesc, hiveObj); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + revokeDesc), conf)); + } + + + private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast) + throws SemanticException { + PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); + subject.setObject(unescapeIdentifier(ast.getChild(0).getText())); + if (ast.getChildCount() > 1) { + for (int i =0;i< ast.getChildCount();i++) { + ASTNode astChild = (ASTNode) ast.getChild(i); + if (astChild.getToken().getType() == HiveParser.TOK_PARTSPEC) { + subject.setPartSpec(DDLSemanticAnalyzer.getPartSpec(astChild)); + } else { + subject.setTable(ast.getChild(0) != null); + } + } + } + return subject; + } + + private List analyzePrincipalListDef(ASTNode node) { + List principalList = new ArrayList(); + + for (int i = 0; i < node.getChildCount(); i++) { + ASTNode child = (ASTNode) node.getChild(i); + PrincipalType type = null; + switch (child.getType()) { + case HiveParser.TOK_USER: + type = PrincipalType.USER; + break; + case HiveParser.TOK_GROUP: + type = PrincipalType.GROUP; + break; + case HiveParser.TOK_ROLE: + type = PrincipalType.ROLE; + break; + } + String principalName = unescapeIdentifier(child.getChild(0).getText()); + PrincipalDesc principalDesc = new PrincipalDesc(principalName, type); + principalList.add(principalDesc); + } + + return principalList; + } + + private List analyzePrivilegeListDef(ASTNode node) + throws SemanticException { + List ret = new ArrayList(); + for (int i = 0; i < node.getChildCount(); i++) { + ASTNode privilegeDef = (ASTNode) node.getChild(i); + + String privilegeStr = unescapeIdentifier(privilegeDef.getChild(0) + .getText()); + Privilege privObj = PrivilegeRegistry.getPrivilege(privilegeStr); + if (privObj == null) { + throw new SemanticException("undefined privilege " + privilegeStr); + } + List cols = null; + if (privilegeDef.getChildCount() > 1) { + cols = getColumnNames((ASTNode) privilegeDef.getChild(1)); + } + PrivilegeDesc privilegeDesc = new PrivilegeDesc(privObj, cols); + ret.add(privilegeDesc); + } + return ret; + } + + private void analyzeCreateRole(ASTNode ast) { + String roleName = unescapeIdentifier(ast.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(roleName, + RoleDDLDesc.RoleOperation.CREATE_ROLE); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + + private void analyzeDropRole(ASTNode ast) { + String roleName = unescapeIdentifier(ast.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(roleName, + RoleDDLDesc.RoleOperation.DROP_ROLE); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + + private void analyzeShowRoleGrant(ASTNode ast) { + ASTNode child = (ASTNode) ast.getChild(0); + PrincipalType principalType = PrincipalType.USER; + switch (child.getType()) { + case HiveParser.TOK_USER: + principalType = PrincipalType.USER; + break; + case HiveParser.TOK_GROUP: + principalType = PrincipalType.GROUP; + break; + case HiveParser.TOK_ROLE: + principalType = PrincipalType.ROLE; + break; + } + String principalName = unescapeIdentifier(child.getChild(0).getText()); + RoleDDLDesc createRoleDesc = new RoleDDLDesc(principalName, principalType, + RoleDDLDesc.RoleOperation.SHOW_ROLE_GRANT); + createRoleDesc.setResFile(ctx.getResFile().toString()); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + createRoleDesc), conf)); + } + private void analyzeCreateDatabase(ASTNode ast) throws SemanticException { String dbName = unescapeIdentifier(ast.getChild(0).getText()); boolean ifNotExists = false; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (working copy) @@ -189,6 +189,23 @@ TOK_LATERAL_VIEW; TOK_TABALIAS; TOK_ANALYZE; +TOK_CREATEROLE; +TOK_DROPROLE; +TOK_GRANT; +TOK_REVOKE; +TOK_SHOW_GRANT; +TOK_PRIVILEGE_LIST; +TOK_PRIVILEGE; +TOK_PRINCIPAL_NAME; +TOK_USER; +TOK_GROUP; +TOK_ROLE; +TOK_GRANT_WITH_OPTION; +TOK_PRIV_OBJECT; +TOK_PRIV_OBJECT_COL; +TOK_GRANT_ROLE; +TOK_REVOKE_ROLE; +TOK_SHOW_ROLE_GRANT; TOK_SHOWINDEXES; TOK_INDEXCOMMENT; TOK_DESCDATABASE; @@ -263,6 +280,14 @@ | analyzeStatement | lockStatement | unlockStatement + | createRoleStatement + | dropRoleStatement + | grantPrivileges + | revokePrivileges + | showGrants + | showRoleGrants + | grantRole + | revokeRole ; ifExists @@ -721,6 +746,114 @@ : KW_UNLOCK KW_TABLE Identifier partitionSpec? -> ^(TOK_UNLOCKTABLE Identifier partitionSpec?) ; +createRoleStatement +@init { msgs.push("create role"); } +@after { msgs.pop(); } + : KW_CREATE KW_ROLE roleName=Identifier + -> ^(TOK_CREATEROLE $roleName) + ; + +dropRoleStatement +@init {msgs.push("drop role");} +@after {msgs.pop();} + : KW_DROP KW_ROLE roleName=Identifier + -> ^(TOK_DROPROLE $roleName) + ; + +grantPrivileges +@init {msgs.push("grant privileges");} +@after {msgs.pop();} + : KW_GRANT privList=privilegeList + privilegeObject? + KW_TO principalSpecification + (KW_WITH withOption)? + -> ^(TOK_GRANT $privList principalSpecification privilegeObject? withOption?) + ; + +revokePrivileges +@init {msgs.push("revoke privileges");} +@afer {msgs.pop();} + : KW_REVOKE privilegeList privilegeObject? KW_FROM principalSpecification + -> ^(TOK_REVOKE privilegeList principalSpecification privilegeObject?) + ; + +grantRole +@init {msgs.push("grant role");} +@after {msgs.pop();} + : KW_GRANT KW_ROLE Identifier (COMMA Identifier)* KW_TO principalSpecification + -> ^(TOK_GRANT_ROLE principalSpecification Identifier+) + ; + +revokeRole +@init {msgs.push("grant role");} +@after {msgs.pop();} + : KW_REVOKE KW_ROLE Identifier (COMMA Identifier)* KW_FROM principalSpecification + -> ^(TOK_REVOKE_ROLE principalSpecification Identifier+) + ; + +showRoleGrants +@init {msgs.push("show grants");} +@after {msgs.pop();} + : KW_SHOW KW_ROLE KW_GRANT principalName + -> ^(TOK_SHOW_ROLE_GRANT principalName) + ; + +showGrants +@init {msgs.push("show grants");} +@after {msgs.pop();} + : KW_SHOW KW_GRANT principalName privilegeIncludeColObject? + -> ^(TOK_SHOW_GRANT principalName privilegeIncludeColObject?) + ; + +privilegeIncludeColObject +@init {msgs.push("privilege object including columns");} +@after {msgs.pop();} + : KW_ON (table=KW_TABLE|KW_DATABASE) Identifier (LPAREN cols=columnNameList RPAREN)? partitionSpec? + -> ^(TOK_PRIV_OBJECT_COL Identifier $table? $cols? partitionSpec?) + ; + +privilegeObject +@init {msgs.push("privilege subject");} +@after {msgs.pop();} + : KW_ON (table=KW_TABLE|KW_DATABASE) Identifier partitionSpec? + -> ^(TOK_PRIV_OBJECT Identifier $table? partitionSpec?) + ; + +privilegeList +@init {msgs.push("grant privilege list");} +@after {msgs.pop();} + : privlegeDef (COMMA privlegeDef)* + -> ^(TOK_PRIVILEGE_LIST privlegeDef+) + ; + +privlegeDef +@init {msgs.push("grant privilege");} +@after {msgs.pop();} + : Identifier (LPAREN cols=columnNameList RPAREN)? + -> ^(TOK_PRIVILEGE Identifier $cols?) + ; + +principalSpecification +@init { msgs.push("user/group/role name list"); } +@after { msgs.pop(); } + : principalName (COMMA principalName)* -> ^(TOK_PRINCIPAL_NAME principalName+) + ; + +principalName +@init {msgs.push("user|group|role name");} +@after {msgs.pop();} + : KW_USER Identifier -> ^(TOK_USER Identifier) + | KW_GROUP Identifier -> ^(TOK_GROUP Identifier) + | KW_ROLE Identifier -> ^(TOK_ROLE Identifier) + ; + +withOption +@init {msgs.push("grant with option");} +@after {msgs.pop();} + : KW_GRANT KW_OPTION + -> ^(TOK_GRANT_WITH_OPTION) + ; + metastoreCheck @init { msgs.push("metastore check statement"); } @after { msgs.pop(); } @@ -1965,6 +2098,10 @@ KW_COMPUTE: 'COMPUTE'; KW_STATISTICS: 'STATISTICS'; KW_USE: 'USE'; +KW_USER: 'USER'; +KW_ROLE: 'ROLE'; +KW_OPTION: 'OPTION'; + // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.parse; -import static org.apache.hadoop.util.StringUtils.stringifyException; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; @@ -125,6 +123,7 @@ import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.JoinCondDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.LateralViewForwardDesc; @@ -888,7 +887,7 @@ } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); } } @@ -924,7 +923,7 @@ // an old SQL construct which has been eliminated in a later Hive // version, so we need to provide full debugging info to help // with fixing the view definition. - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); StringBuilder sb = new StringBuilder(); sb.append(e.getMessage()); ErrorMsg.renderOrigin(sb, viewOrigin); @@ -5876,7 +5875,7 @@ tsDesc.setStatsAggPrefix(k); // set up WritenEntity for replication - outputs.add(new WriteEntity(tab)); + outputs.add(new WriteEntity(tab, true)); // add WriteEntity for each matching partition if (tab.isPartitioned()) { @@ -5887,7 +5886,7 @@ if (partitions != null) { for (Partition partn : partitions) { // inputs.add(new ReadEntity(partn)); // is this needed at all? - outputs.add(new WriteEntity(partn)); + outputs.add(new WriteEntity(partn, true)); } } } @@ -6144,7 +6143,7 @@ } catch (HiveException e) { // Has to use full name to make sure it does not conflict with // org.apache.commons.lang.StringUtils - LOG.error(stringifyException(e)); + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); throw new SemanticException(e.getMessage(), e); } @@ -6500,6 +6499,9 @@ ASTNode child = ast; LOG.info("Starting Semantic Analysis"); + + //overwrite this if needed. + SessionState.get().setCommandType(HiveOperation.QUERY); // analyze create table command if (ast.getToken().getType() == HiveParser.TOK_CREATETABLE) { @@ -6512,12 +6514,13 @@ // analyze create view command if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW) { child = analyzeCreateView(ast, qb); + SessionState.get().setCommandType(HiveOperation.CREATEVIEW); if (child == null) { return; } viewSelect = child; } - + // continue analyzing from the child ASTNode. doPhase1(child, qb, initPhase1Ctx()); LOG.info("Completed phase 1 of Semantic Analysis"); @@ -7060,6 +7063,9 @@ storageFormat.storageHandler, shared.serdeProps, tblProps, ifNotExists); validateCreateTable(crtTblDesc); + // outputs is empty, which means this create table happens in the current + // database. + SessionState.get().setCommandType(HiveOperation.CREATETABLE); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblDesc), conf)); break; @@ -7067,6 +7073,7 @@ case CTLT: // create table like CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, location, ifNotExists, likeTableName); + SessionState.get().setCommandType(HiveOperation.CREATETABLE); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc), conf)); break; @@ -7091,6 +7098,8 @@ tblProps, ifNotExists); qb.setTableDesc(crtTblDesc); + SessionState.get().setCommandType(HiveOperation.CREATETABLE_AS_SELECT); + return selectStmt; default: throw new SemanticException("Unrecognized command."); Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (working copy) @@ -21,6 +21,7 @@ import java.util.HashMap; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -29,63 +30,74 @@ */ public final class SemanticAnalyzerFactory { - static HashMap commandType = new HashMap(); - static HashMap tablePartitionCommandType = new HashMap(); + static HashMap commandType = new HashMap(); + static HashMap tablePartitionCommandType = new HashMap(); static { - commandType.put(HiveParser.TOK_EXPLAIN, "EXPLAIN"); - commandType.put(HiveParser.TOK_LOAD, "LOAD"); - commandType.put(HiveParser.TOK_CREATEDATABASE, "CREATEDATABASE"); - commandType.put(HiveParser.TOK_DROPDATABASE, "DROPDATABASE"); - commandType.put(HiveParser.TOK_SWITCHDATABASE, "SWITCHDATABASE"); - commandType.put(HiveParser.TOK_CREATETABLE, "CREATETABLE"); - commandType.put(HiveParser.TOK_DROPTABLE, "DROPTABLE"); - commandType.put(HiveParser.TOK_DESCTABLE, "DESCTABLE"); - commandType.put(HiveParser.TOK_DESCFUNCTION, "DESCFUNCTION"); - commandType.put(HiveParser.TOK_MSCK, "MSCK"); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, "ALTERTABLE_ADDCOLS"); - commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, "ALTERTABLE_REPLACECOLS"); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, "ALTERTABLE_RENAMECOL"); - commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, "ALTERTABLE_RENAME"); - commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, "ALTERTABLE_DROPPARTS"); - commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, "ALTERTABLE_ADDPARTS"); - commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, "ALTERTABLE_TOUCH"); - commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, "ALTERTABLE_ARCHIVE"); - commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, "ALTERTABLE_UNARCHIVE"); - commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, "ALTERTABLE_PROPERTIES"); - commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, "ALTERTABLE_SERIALIZER"); - commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, "ALTERTABLE_SERDEPROPERTIES"); - commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, "ALTERINDEX_REBUILD"); - commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, "ALTERINDEX_PROPS"); - commandType.put(HiveParser.TOK_SHOWDATABASES, "SHOWDATABASES"); - commandType.put(HiveParser.TOK_SHOWTABLES, "SHOWTABLES"); - commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, "SHOW_TABLESTATUS"); - commandType.put(HiveParser.TOK_SHOWFUNCTIONS, "SHOWFUNCTIONS"); - commandType.put(HiveParser.TOK_SHOWPARTITIONS, "SHOWPARTITIONS"); - commandType.put(HiveParser.TOK_SHOWINDEXES, "SHOWINDEXES"); - commandType.put(HiveParser.TOK_SHOWLOCKS, "SHOWLOCKS"); - commandType.put(HiveParser.TOK_CREATEFUNCTION, "CREATEFUNCTION"); - commandType.put(HiveParser.TOK_DROPFUNCTION, "DROPFUNCTION"); - commandType.put(HiveParser.TOK_CREATEVIEW, "CREATEVIEW"); - commandType.put(HiveParser.TOK_DROPVIEW, "DROPVIEW"); - commandType.put(HiveParser.TOK_CREATEINDEX, "CREATEINDEX"); - commandType.put(HiveParser.TOK_DROPINDEX, "DROPINDEX"); - commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, "ALTERVIEW_PROPERTIES"); - commandType.put(HiveParser.TOK_QUERY, "QUERY"); - commandType.put(HiveParser.TOK_LOCKTABLE, "LOCKTABLE"); - commandType.put(HiveParser.TOK_UNLOCKTABLE, "UNLOCKTABLE"); + commandType.put(HiveParser.TOK_EXPLAIN, HiveOperation.EXPLAIN); + commandType.put(HiveParser.TOK_LOAD, HiveOperation.LOAD); + commandType.put(HiveParser.TOK_CREATEDATABASE, HiveOperation.CREATEDATABASE); + commandType.put(HiveParser.TOK_DROPDATABASE, HiveOperation.DROPDATABASE); + commandType.put(HiveParser.TOK_SWITCHDATABASE, HiveOperation.SWITCHDATABASE); + commandType.put(HiveParser.TOK_CREATETABLE, HiveOperation.CREATETABLE); + commandType.put(HiveParser.TOK_DROPTABLE, HiveOperation.DROPTABLE); + commandType.put(HiveParser.TOK_DESCTABLE, HiveOperation.DESCTABLE); + commandType.put(HiveParser.TOK_DESCFUNCTION, HiveOperation.DESCFUNCTION); + commandType.put(HiveParser.TOK_MSCK, HiveOperation.MSCK); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDCOLS, HiveOperation.ALTERTABLE_ADDCOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_REPLACECOLS, HiveOperation.ALTERTABLE_REPLACECOLS); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAMECOL, HiveOperation.ALTERTABLE_RENAMECOL); + commandType.put(HiveParser.TOK_ALTERTABLE_RENAME, HiveOperation.ALTERTABLE_RENAME); + commandType.put(HiveParser.TOK_ALTERTABLE_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); + commandType.put(HiveParser.TOK_ALTERTABLE_TOUCH, HiveOperation.ALTERTABLE_TOUCH); + commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, HiveOperation.ALTERTABLE_ARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_UNARCHIVE); + commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERTABLE_SERIALIZER, HiveOperation.ALTERTABLE_SERIALIZER); + commandType.put(HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES, HiveOperation.ALTERTABLE_SERDEPROPERTIES); + commandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, HiveOperation.ALTERTABLE_CLUSTER_SORT); + commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES); + commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); + commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, HiveOperation.SHOW_TABLESTATUS); + commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS); + commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES); + commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS); + commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS); + commandType.put(HiveParser.TOK_CREATEFUNCTION, HiveOperation.CREATEFUNCTION); + commandType.put(HiveParser.TOK_DROPFUNCTION, HiveOperation.DROPFUNCTION); + commandType.put(HiveParser.TOK_CREATEVIEW, HiveOperation.CREATEVIEW); + commandType.put(HiveParser.TOK_DROPVIEW, HiveOperation.DROPVIEW); + commandType.put(HiveParser.TOK_CREATEINDEX, HiveOperation.CREATEINDEX); + commandType.put(HiveParser.TOK_DROPINDEX, HiveOperation.DROPINDEX); + commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); + commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); + commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); + commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); + commandType.put(HiveParser.TOK_CREATEROLE, HiveOperation.CREATEROLE); + commandType.put(HiveParser.TOK_DROPROLE, HiveOperation.DROPROLE); + commandType.put(HiveParser.TOK_GRANT, HiveOperation.GRANT_PRIVILEGE); + commandType.put(HiveParser.TOK_REVOKE, HiveOperation.REVOKE_PRIVILEGE); + commandType.put(HiveParser.TOK_SHOW_GRANT, HiveOperation.SHOW_GRANT); + commandType.put(HiveParser.TOK_GRANT_ROLE, HiveOperation.GRANT_ROLE); + commandType.put(HiveParser.TOK_REVOKE_ROLE, HiveOperation.REVOKE_ROLE); + commandType.put(HiveParser.TOK_SHOW_ROLE_GRANT, HiveOperation.SHOW_ROLE_GRANT); } static { - tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, - new String[] { "ALTERTABLE_PROTECTMODE", "ALTERPARTITION_PROTECTMODE" }); + tablePartitionCommandType.put( + HiveParser.TOK_ALTERTABLE_ALTERPARTS_PROTECTMODE, + new HiveOperation[] { HiveOperation.ALTERTABLE_PROTECTMODE, + HiveOperation.ALTERPARTITION_PROTECTMODE }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_FILEFORMAT, - new String[] { "ALTERTABLE_FILEFORMAT", "ALTERPARTITION_FILEFORMAT" }); + new HiveOperation[] { HiveOperation.ALTERTABLE_FILEFORMAT, + HiveOperation.ALTERPARTITION_FILEFORMAT }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_LOCATION, - new String[] { "ALTERTABLE_LOCATION", "ALTERPARTITION_LOCATION" }); + new HiveOperation[] { HiveOperation.ALTERTABLE_LOCATION, + HiveOperation.ALTERPARTITION_LOCATION }); } - public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) throws SemanticException { if (tree.getToken() == null) { @@ -134,9 +146,17 @@ case HiveParser.TOK_ALTERTABLE_UNARCHIVE: case HiveParser.TOK_LOCKTABLE: case HiveParser.TOK_UNLOCKTABLE: + case HiveParser.TOK_CREATEROLE: + case HiveParser.TOK_DROPROLE: + case HiveParser.TOK_GRANT: + case HiveParser.TOK_REVOKE: + case HiveParser.TOK_SHOW_GRANT: + case HiveParser.TOK_GRANT_ROLE: + case HiveParser.TOK_REVOKE_ROLE: + case HiveParser.TOK_SHOW_ROLE_GRANT: return new DDLSemanticAnalyzer(conf); case HiveParser.TOK_ALTERTABLE_PARTITION: - String commandType = null; + HiveOperation commandType = null; Integer type = ((ASTNode) tree.getChild(1)).getToken().getType(); if (tree.getChild(0).getChildCount() > 1) { commandType = tablePartitionCommandType.get(type)[1]; @@ -154,7 +174,7 @@ } } - private static void setSessionCommandType(String commandType) { + private static void setSessionCommandType(HiveOperation commandType) { if (SessionState.get() != null) { SessionState.get().setCommandType(commandType); } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -58,6 +58,12 @@ private ShowIndexesDesc showIndexesDesc; private DescDatabaseDesc descDbDesc; + private RoleDDLDesc roleDDLDesc; + private GrantDesc grantDesc; + private ShowGrantDesc showGrantDesc; + private RevokeDesc revokeDesc; + private GrantRevokeRoleDDL grantRevokeRoleDDL; + /** * ReadEntitites that are passed to the hooks. */ @@ -328,6 +334,36 @@ } public DDLWork(HashSet inputs, HashSet outputs, + RoleDDLDesc roleDDLDesc) { + this(inputs, outputs); + this.roleDDLDesc = roleDDLDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + GrantDesc grantDesc) { + this(inputs, outputs); + this.grantDesc = grantDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + ShowGrantDesc showGrant) { + this(inputs, outputs); + this.showGrantDesc = showGrant; + } + + public DDLWork(HashSet inputs, HashSet outputs, + RevokeDesc revokeDesc) { + this(inputs, outputs); + this.revokeDesc = revokeDesc; + } + + public DDLWork(HashSet inputs, HashSet outputs, + GrantRevokeRoleDDL grantRevokeRoleDDL) { + this(inputs, outputs); + this.grantRevokeRoleDDL = grantRevokeRoleDDL; + } + + public DDLWork(HashSet inputs, HashSet outputs, ShowIndexesDesc showIndexesDesc) { this(inputs, outputs); this.showIndexesDesc = showIndexesDesc; @@ -740,4 +776,68 @@ this.dropIdxDesc = dropIdxDesc; } + /** + * @return role ddl desc + */ + public RoleDDLDesc getRoleDDLDesc() { + return roleDDLDesc; + } + + /** + * @param roleDDLDesc role ddl desc + */ + public void setRoleDDLDesc(RoleDDLDesc roleDDLDesc) { + this.roleDDLDesc = roleDDLDesc; + } + + /** + * @return grant desc + */ + public GrantDesc getGrantDesc() { + return grantDesc; + } + + /** + * @param grantDesc grant desc + */ + public void setGrantDesc(GrantDesc grantDesc) { + this.grantDesc = grantDesc; + } + + /** + * @return show grant desc + */ + public ShowGrantDesc getShowGrantDesc() { + return showGrantDesc; + } + + /** + * @param showGrantDesc + */ + public void setShowGrantDesc(ShowGrantDesc showGrantDesc) { + this.showGrantDesc = showGrantDesc; + } + + public RevokeDesc getRevokeDesc() { + return revokeDesc; + } + + public void setRevokeDesc(RevokeDesc revokeDesc) { + this.revokeDesc = revokeDesc; + } + + /** + * @return + */ + public GrantRevokeRoleDDL getGrantRevokeRoleDDL() { + return grantRevokeRoleDDL; + } + + /** + * @param grantRevokeRoleDDL + */ + public void setGrantRevokeRoleDDL(GrantRevokeRoleDDL grantRevokeRoleDDL) { + this.grantRevokeRoleDDL = grantRevokeRoleDDL; + } + } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GrantDesc.java (revision 0) @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.PrincipalType; + +@Explain(displayName = "Grant") +public class GrantDesc extends DDLDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + private List privileges; + + private List principals; + + private boolean grantOption; + + private String grantor; + + private PrincipalType grantorType; + + private PrivilegeObjectDesc privilegeSubjectDesc; + + public GrantDesc(PrivilegeObjectDesc privilegeSubject, + List privilegeDesc, List principalDesc, + String grantor, PrincipalType grantorType, boolean grantOption) { + super(); + this.privilegeSubjectDesc = privilegeSubject; + this.privileges = privilegeDesc; + this.principals = principalDesc; + this.grantor = grantor; + this.grantorType = grantorType; + this.grantOption = grantOption; + } + + /** + * @return privileges + */ + @Explain(displayName = "Privileges") + public List getPrivileges() { + return privileges; + } + + /** + * @param privileges + */ + public void setPrivileges(List privileges) { + this.privileges = privileges; + } + + /** + * @return principals + */ + @Explain(displayName = "Principals") + public List getPrincipals() { + return principals; + } + + /** + * @param principals + */ + public void setPrincipals(List principals) { + this.principals = principals; + } + + /** + * @return grant option + */ + @Explain(displayName = "grant option") + public boolean isGrantOption() { + return grantOption; + } + + /** + * @param grantOption + */ + public void setGrantOption(boolean grantOption) { + this.grantOption = grantOption; + } + + /** + * @return privilege subject + */ + @Explain(displayName="privilege subject") + public PrivilegeObjectDesc getPrivilegeSubjectDesc() { + return privilegeSubjectDesc; + } + + /** + * @param privilegeSubjectDesc + */ + public void setPrivilegeSubjectDesc(PrivilegeObjectDesc privilegeSubjectDesc) { + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + + public String getGrantor() { + return grantor; + } + + public void setGrantor(String grantor) { + this.grantor = grantor; + } + + public PrincipalType getGrantorType() { + return grantorType; + } + + public void setGrantorType(PrincipalType grantorType) { + this.grantorType = grantorType; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GrantRevokeRoleDDL.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.List; + +@Explain(displayName="grant or revoke roles") +public class GrantRevokeRoleDDL { + + private boolean grant; + + private List principalDesc; + + private List roles; + + public GrantRevokeRoleDDL() { + } + + public GrantRevokeRoleDDL(boolean grant, List roles, List principalDesc) { + super(); + this.grant = grant; + this.principalDesc = principalDesc; + this.roles = roles; + } + + /** + * @return grant or revoke privileges + */ + @Explain(displayName="grant (or revoke)") + public boolean getGrant() { + return grant; + } + + public void setGrant(boolean grant) { + this.grant = grant; + } + + /** + * @return a list of principals + */ + @Explain(displayName="principals") + public List getPrincipalDesc() { + return principalDesc; + } + + public void setPrincipalDesc(List principalDesc) { + this.principalDesc = principalDesc; + } + + /** + * @return a list of roles + */ + @Explain(displayName="roles") + public List getRoles() { + return roles; + } + + public void setRoles(List roles) { + this.roles = roles; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (revision 0) @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.hive.ql.security.authorization.Privilege; + +public enum HiveOperation { + + EXPLAIN("EXPLAIN", null, null), + LOAD("LOAD", null, new Privilege[]{Privilege.ALTER_DATA}), + CREATEDATABASE("CREATEDATABASE", null, null), + DROPDATABASE("DROPDATABASE", null, null), + SWITCHDATABASE("SWITCHDATABASE", null, null), + DROPTABLE ("DROPTABLE", null, new Privilege[]{Privilege.DROP}), + DESCTABLE("DESCTABLE", null, null), + DESCFUNCTION("DESCFUNCTION", null, null), + MSCK("MSCK", null, null), + ALTERTABLE_ADDCOLS("ALTERTABLE_ADDCOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_REPLACECOLS("ALTERTABLE_REPLACECOLS", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAMECOL("ALTERTABLE_RENAMECOL", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_RENAME("ALTERTABLE_RENAME", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_DROPPARTS("ALTERTABLE_DROPPARTS", new Privilege[]{Privilege.DROP}, null), + ALTERTABLE_ADDPARTS("ALTERTABLE_ADDPARTS", new Privilege[]{Privilege.CREATE}, null), + ALTERTABLE_TOUCH("ALTERTABLE_TOUCH", null, null), + ALTERTABLE_ARCHIVE("ALTERTABLE_ARCHIVE", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERTABLE_UNARCHIVE("ALTERTABLE_UNARCHIVE", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERTABLE_PROPERTIES("ALTERTABLE_PROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_SERIALIZER("ALTERTABLE_SERIALIZER", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_SERDEPROPERTIES("ALTERTABLE_SERDEPROPERTIES", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_CLUSTER_SORT("ALTERTABLE_CLUSTER_SORT", new Privilege[]{Privilege.ALTER_METADATA}, null), + SHOWDATABASES("SHOWDATABASES", new Privilege[]{Privilege.SHOW_DATABASE}, null), + SHOWTABLES("SHOWTABLES", null, null), + SHOW_TABLESTATUS("SHOW_TABLESTATUS", null, null), + SHOWFUNCTIONS("SHOWFUNCTIONS", null, null), + SHOWINDEXES("SHOWINDEXES", null, null), + SHOWPARTITIONS("SHOWPARTITIONS", null, null), + SHOWLOCKS("SHOWLOCKS", null, null), + CREATEFUNCTION("CREATEFUNCTION", null, null), + DROPFUNCTION("DROPFUNCTION", null, null), + CREATEVIEW("CREATEVIEW", null, null), + DROPVIEW("DROPVIEW", null, null), + CREATEINDEX("CREATEINDEX", null, null), + DROPINDEX("DROPINDEX", null, null), + ALTERINDEX_REBUILD("ALTERINDEX_REBUILD", null, null), + ALTERVIEW_PROPERTIES("ALTERVIEW_PROPERTIES", null, null), + LOCKTABLE("LOCKTABLE", new Privilege[]{Privilege.LOCK}, null), + UNLOCKTABLE("UNLOCKTABLE", new Privilege[]{Privilege.LOCK}, null), + CREATEROLE("CREATEROLE", null, null), + DROPROLE("DROPROLE", null, null), + GRANT_PRIVILEGE("GRANT_PRIVILEGE", null, null), + REVOKE_PRIVILEGE("REVOKE_PRIVILEGE", null, null), + SHOW_GRANT("SHOW_GRANT", null, null), + GRANT_ROLE("GRANT_ROLE", null, null), + REVOKE_ROLE("REVOKE_ROLE", null, null), + SHOW_ROLE_GRANT("SHOW_ROLE_GRANT", null, null), + ALTERTABLE_PROTECTMODE("ALTERTABLE_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_PROTECTMODE("ALTERPARTITION_PROTECTMODE", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_FILEFORMAT("ALTERTABLE_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERPARTITION_FILEFORMAT("ALTERPARTITION_FILEFORMAT", new Privilege[]{Privilege.ALTER_METADATA}, null), + ALTERTABLE_LOCATION("ALTERTABLE_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), + ALTERPARTITION_LOCATION("ALTERPARTITION_LOCATION", new Privilege[]{Privilege.ALTER_DATA}, null), + CREATETABLE("CREATETABLE", null, new Privilege[]{Privilege.CREATE}), + CREATETABLE_AS_SELECT("CREATETABLE_AS_SELECT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.CREATE}), + QUERY("QUERY", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}), + ; + + private String operationName; + + private Privilege[] inputRequiredPrivileges; + + private Privilege[] outputRequiredPrivileges; + + public Privilege[] getInputRequiredPrivileges() { + return inputRequiredPrivileges; + } + + public Privilege[] getOutputRequiredPrivileges() { + return outputRequiredPrivileges; + } + + public String getOperationName() { + return operationName; + } + + private HiveOperation(String operationName, + Privilege[] inputRequiredPrivileges, Privilege[] outputRequiredPrivileges) { + this.operationName = operationName; + this.inputRequiredPrivileges = inputRequiredPrivileges; + this.outputRequiredPrivileges = outputRequiredPrivileges; + } + + public static class PrivilegeAgreement { + + private Privilege[] inputUserLevelRequiredPriv; + private Privilege[] inputDBLevelRequiredPriv; + private Privilege[] inputTableLevelRequiredPriv; + private Privilege[] inputColumnLevelRequiredPriv; + private Privilege[] outputUserLevelRequiredPriv; + private Privilege[] outputDBLevelRequiredPriv; + private Privilege[] outputTableLevelRequiredPriv; + private Privilege[] outputColumnLevelRequiredPriv; + + public PrivilegeAgreement putUserLevelRequiredPriv( + Privilege[] inputUserLevelRequiredPriv, + Privilege[] outputUserLevelRequiredPriv) { + this.inputUserLevelRequiredPriv = inputUserLevelRequiredPriv; + this.outputUserLevelRequiredPriv = outputUserLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putDBLevelRequiredPriv( + Privilege[] inputDBLevelRequiredPriv, + Privilege[] outputDBLevelRequiredPriv) { + this.inputDBLevelRequiredPriv = inputDBLevelRequiredPriv; + this.outputDBLevelRequiredPriv = outputDBLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putTableLevelRequiredPriv( + Privilege[] inputTableLevelRequiredPriv, + Privilege[] outputTableLevelRequiredPriv) { + this.inputTableLevelRequiredPriv = inputTableLevelRequiredPriv; + this.outputTableLevelRequiredPriv = outputTableLevelRequiredPriv; + return this; + } + + public PrivilegeAgreement putColumnLevelRequiredPriv( + Privilege[] inputColumnLevelPriv, Privilege[] outputColumnLevelPriv) { + this.inputColumnLevelRequiredPriv = inputColumnLevelPriv; + this.outputColumnLevelRequiredPriv = outputColumnLevelPriv; + return this; + } + + public Privilege[] getInputUserLevelRequiredPriv() { + return inputUserLevelRequiredPriv; + } + + public Privilege[] getInputDBLevelRequiredPriv() { + return inputDBLevelRequiredPriv; + } + + public Privilege[] getInputTableLevelRequiredPriv() { + return inputTableLevelRequiredPriv; + } + + public Privilege[] getInputColumnLevelRequiredPriv() { + return inputColumnLevelRequiredPriv; + } + + public Privilege[] getOutputUserLevelRequiredPriv() { + return outputUserLevelRequiredPriv; + } + + public Privilege[] getOutputDBLevelRequiredPriv() { + return outputDBLevelRequiredPriv; + } + + public Privilege[] getOutputTableLevelRequiredPriv() { + return outputTableLevelRequiredPriv; + } + + public Privilege[] getOutputColumnLevelRequiredPriv() { + return outputColumnLevelRequiredPriv; + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrincipalDesc.java (revision 0) @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.PrincipalType; + +@Explain(displayName = "Principal") +public class PrincipalDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + private String name; + + private PrincipalType type; + + public PrincipalDesc(String name, PrincipalType type) { + super(); + this.name = name; + this.type = type; + } + + public PrincipalDesc() { + super(); + } + + @Explain(displayName="name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Explain(displayName="type") + public PrincipalType getType() { + return type; + } + + public void setType(PrincipalType type) { + this.type = type; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeDesc.java (revision 0) @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.ql.security.authorization.Privilege; + +@Explain(displayName = "Privilege") +public class PrivilegeDesc implements Serializable, Cloneable { + private static final long serialVersionUID = 1L; + + private Privilege privilege; + + private List columns; + + public PrivilegeDesc(Privilege privilege, List columns) { + super(); + this.privilege = privilege; + this.columns = columns; + } + + public PrivilegeDesc() { + super(); + } + + /** + * @return privilege definition + */ + @Explain(displayName = "privilege") + public Privilege getPrivilege() { + return privilege; + } + + /** + * @param privilege + */ + public void setPrivilege(Privilege privilege) { + this.privilege = privilege; + } + + /** + * @return columns on which the given privilege take affect. + */ + @Explain(displayName = "columns") + public List getColumns() { + return columns; + } + + /** + * @param columns + */ + public void setColumns(List columns) { + this.columns = columns; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PrivilegeObjectDesc.java (revision 0) @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.util.HashMap; + +@Explain(displayName="privilege subject") +public class PrivilegeObjectDesc { + + private boolean table; + + private String object; + + private HashMap partSpec; + + public PrivilegeObjectDesc(boolean isTable, String object, + HashMap partSpec) { + super(); + this.table = isTable; + this.object = object; + this.partSpec = partSpec; + } + + public PrivilegeObjectDesc() { + } + + @Explain(displayName="is table") + public boolean getTable() { + return table; + } + + public void setTable(boolean isTable) { + this.table = isTable; + } + + @Explain(displayName="object") + public String getObject() { + return object; + } + + public void setObject(String object) { + this.object = object; + } + + @Explain(displayName="partition spec") + public HashMap getPartSpec() { + return partSpec; + } + + public void setPartSpec(HashMap partSpec) { + this.partSpec = partSpec; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RevokeDesc.java (revision 0) @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +@Explain(displayName="Revoke") +public class RevokeDesc extends DDLDesc implements Serializable, Cloneable { + + private static final long serialVersionUID = 1L; + + private List privileges; + + private List principals; + + private PrivilegeObjectDesc privilegeSubjectDesc; + + public RevokeDesc(){ + } + + public RevokeDesc(List privileges, + List principals, PrivilegeObjectDesc privilegeSubjectDesc) { + super(); + this.privileges = privileges; + this.principals = principals; + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + + public List getPrivileges() { + return privileges; + } + + public void setPrivileges(List privileges) { + this.privileges = privileges; + } + + public List getPrincipals() { + return principals; + } + + public void setPrincipals(List principals) { + this.principals = principals; + } + + public PrivilegeObjectDesc getPrivilegeSubjectDesc() { + return privilegeSubjectDesc; + } + + public void setPrivilegeSubjectDesc(PrivilegeObjectDesc privilegeSubjectDesc) { + this.privilegeSubjectDesc = privilegeSubjectDesc; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java (revision 0) @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.metastore.api.PrincipalType; + +@Explain(displayName = "Create Role") +public class RoleDDLDesc extends DDLDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + private String name; + + private PrincipalType principalType; + + private boolean group; + + private RoleOperation operation; + + private String resFile; + + public static enum RoleOperation { + DROP_ROLE("drop_role"), CREATE_ROLE("create_role"), SHOW_ROLE_GRANT("show_roles"); + private String operationName; + + private RoleOperation() { + } + + private RoleOperation(String operationName) { + this.operationName = operationName; + } + + public String getOperationName() { + return operationName; + } + + public String toString () { + return this.operationName; + } + } + + public RoleDDLDesc(){ + } + + public RoleDDLDesc(String roleName, RoleOperation operation) { + this(roleName, PrincipalType.USER, operation); + } + + public RoleDDLDesc(String principalName, PrincipalType principalType, + RoleOperation operation) { + this.name = principalName; + this.principalType = principalType; + this.operation = operation; + } + + @Explain(displayName = "name") + public String getName() { + return name; + } + + public void setName(String roleName) { + this.name = roleName; + } + + @Explain(displayName = "role operation") + public RoleOperation getOperation() { + return operation; + } + + public void setOperation(RoleOperation operation) { + this.operation = operation; + } + + public PrincipalType getPrincipalType() { + return principalType; + } + + public void setPrincipalType(PrincipalType principalType) { + this.principalType = principalType; + } + + public boolean getGroup() { + return group; + } + + public void setGroup(boolean group) { + this.group = group; + } + + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowGrantDesc.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.plan; + +import java.util.List; + +@Explain(displayName="show grant desc") +public class ShowGrantDesc { + + private PrincipalDesc principalDesc; + + private PrivilegeObjectDesc hiveObj; + + private List columns; + + private String resFile; + + public ShowGrantDesc(){ + } + + public ShowGrantDesc(String resFile, PrincipalDesc principalDesc, + PrivilegeObjectDesc subjectObj, List columns) { + this.resFile = resFile; + this.principalDesc = principalDesc; + this.hiveObj = subjectObj; + this.columns = columns; + } + + @Explain(displayName="principal desc") + public PrincipalDesc getPrincipalDesc() { + return principalDesc; + } + + public void setPrincipalDesc(PrincipalDesc principalDesc) { + this.principalDesc = principalDesc; + } + + @Explain(displayName="object") + public PrivilegeObjectDesc getHiveObj() { + return hiveObj; + } + + public void setHiveObj(PrivilegeObjectDesc subjectObj) { + this.hiveObj = subjectObj; + } + + public String getResFile() { + return resFile; + } + + public void setResFile(String resFile) { + this.resFile = resFile; + } + + public List getColumns() { + return columns; + } + + public void setColumns(List columns) { + this.columns = columns; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/Authenticator.java (revision 0) @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; + +public interface Authenticator { + + public String getUserName(); + + public List getGroupNames(); + + public boolean destroy(); + + public void init(Configuration conf); + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/AuthenticatorFactory.java (revision 0) @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; + +public class AuthenticatorFactory { + + @SuppressWarnings("unchecked") + public static Authenticator getAuthenticator(Configuration conf) + throws HiveException { + + String clsStr = HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER); + + Authenticator ret = null; + try { + Class cls = null; + if (clsStr == null || clsStr.trim().equals("")) { + cls = HadoopDefaultAuthenticator.class; + } else { + cls = (Class) Class + .forName(clsStr); + } + if (cls != null) { + ret = cls.newInstance(); + ret.init(conf); + } + } catch (Exception e) { + throw new HiveException(e); + } + + return ret; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/HadoopDefaultAuthenticator.java (revision 0) @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; + +public class HadoopDefaultAuthenticator implements Authenticator { + + private String userName; + private List groupNames; + + @Override + public List getGroupNames() { + return groupNames; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public void init(Configuration conf) { + UserGroupInformation ugi = null; + try { + ugi = ShimLoader.getHadoopShims().getUGIForConf(conf); + } catch (Exception e) { + throw new RuntimeException(e); + } + + if (ugi == null) { + throw new RuntimeException( + "Can not initialize HadoopDefaultAuthenticator."); + } + + this.userName = ugi.getUserName(); + if (ugi.getGroupNames() != null) { + this.groupNames = Arrays.asList(ugi.getGroupNames()); + } + + System.out.println("User Name is :" + this.getUserName()); + System.out.println("Group Names are :" + this.getGroupNames()); + } + + @Override + public boolean destroy() { + return true; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationManagerFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationManagerFactory.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationManagerFactory.java (revision 0) @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.security.Authenticator; + +public class AuthorizationManagerFactory { + + @SuppressWarnings("unchecked") + public static HiveAuthorizationProvider getAuthorizeProviderManager( + Configuration conf, Authenticator authenticator) throws HiveException { + + String clsStr = HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER); + + HiveAuthorizationProvider ret = null; + try { + Class cls = null; + if (clsStr == null || clsStr.trim().equals("")) { + cls = DefaultHiveAuthorizationProvider.class; + } else { + cls = (Class) Class + .forName(clsStr); + } + if (cls != null) { + ret = cls.newInstance(); + ret.init(conf); + } + } catch (Exception e) { + throw new HiveException(e); + } + + ret.setAuthenticator(authenticator); + return ret; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationProvider.java (revision 0) @@ -0,0 +1,455 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.util.StringUtils; + +public class DefaultHiveAuthorizationProvider extends + HiveAuthorizationProviderBase { + + static class BitSetChecker { + + boolean[] inputCheck = null; + boolean[] outputCheck = null; + + public static BitSetChecker getBitSetChecker(Privilege[] inputRequiredPriv, + Privilege[] outputRequiredPriv) { + BitSetChecker checker = new BitSetChecker(); + if (inputRequiredPriv != null) { + checker.inputCheck = new boolean[inputRequiredPriv.length]; + for (int i = 0; i < checker.inputCheck.length; i++) { + checker.inputCheck[i] = false; + } + } + if (outputRequiredPriv != null) { + checker.outputCheck = new boolean[outputRequiredPriv.length]; + for (int i = 0; i < checker.outputCheck.length; i++) { + checker.outputCheck[i] = false; + } + } + + return checker; + } + + } + + @Override + public boolean authorize(Privilege[] inputRequiredPriv, + Privilege[] outputRequiredPriv) throws HiveException, AuthorizationException { + + BitSetChecker checker = BitSetChecker.getBitSetChecker(inputRequiredPriv, + outputRequiredPriv); + boolean[] inputCheck = checker.inputCheck; + boolean[] outputCheck = checker.outputCheck; + + boolean pass = authorizeUserPriv(inputRequiredPriv, inputCheck, outputRequiredPriv, + outputCheck); + checkAndThrowAuthorizationException(inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck, null, null, null, null); + return pass; + } + + @Override + public boolean authorize(Database db, Privilege[] inputRequiredPriv, + Privilege[] outputRequiredPriv) throws HiveException, AuthorizationException { + + BitSetChecker checker = BitSetChecker.getBitSetChecker(inputRequiredPriv, + outputRequiredPriv); + boolean[] inputCheck = checker.inputCheck; + boolean[] outputCheck = checker.outputCheck; + + boolean pass = authorizeUserAndDBPriv(db, inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck); + + checkAndThrowAuthorizationException(inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck, db.getName(), null, null, null); + + return pass; + } + + @Override + public boolean authorize(Table table, Privilege[] inputRequiredPriv, + Privilege[] outputRequiredPriv) throws HiveException { + BitSetChecker checker = BitSetChecker.getBitSetChecker(inputRequiredPriv, + outputRequiredPriv); + boolean[] inputCheck = checker.inputCheck; + boolean[] outputCheck = checker.outputCheck; + + boolean pass = authorizeUserDBAndTable(table, inputRequiredPriv, + outputRequiredPriv, inputCheck, outputCheck); + checkAndThrowAuthorizationException(inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck, table.getDbName(), table.getTableName(), + null, null); + + return pass; + } + + @Override + public boolean authorize(Partition part, Privilege[] inputRequiredPriv, + Privilege[] outputRequiredPriv) throws HiveException { + BitSetChecker checker = BitSetChecker.getBitSetChecker(inputRequiredPriv, + outputRequiredPriv); + boolean[] inputCheck = checker.inputCheck; + boolean[] outputCheck = checker.outputCheck; + + if (authorizeUserDbAndPartition(part, inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck)){ + return true; + } + + checkAndThrowAuthorizationException(inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck, part.getTable().getDbName(), part + .getTable().getTableName(), part.getName(), null); + + return false; + } + + @Override + public boolean authorize(Table table, Partition part, List columns, + Privilege[] inputRequiredPriv, Privilege[] outputRequiredPriv) + throws HiveException { + BitSetChecker checker = BitSetChecker.getBitSetChecker(inputRequiredPriv, + outputRequiredPriv); + boolean[] inputCheck = checker.inputCheck; + boolean[] outputCheck = checker.outputCheck; + + String partName = null; + List partValues = null; + if (part != null + && (table.getParameters().get("PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE" + .equalsIgnoreCase(table.getParameters().get( + "PARTITION_LEVEL_PRIVILEGE"))))) { + partName = part.getName(); + partValues = part.getValues(); + } + + if (partValues == null) { + if (authorizeUserDBAndTable(table, inputRequiredPriv, outputRequiredPriv, + inputCheck, outputCheck)) { + return true; + } + } else { + if (authorizeUserDbAndPartition(part, inputRequiredPriv, + outputRequiredPriv, inputCheck, outputCheck)) { + return true; + } + } + + for (String col : columns) { + + BitSetChecker checker2 = BitSetChecker.getBitSetChecker( + inputRequiredPriv, outputRequiredPriv); + boolean[] inputCheck2 = checker2.inputCheck; + boolean[] outputCheck2 = checker2.outputCheck; + + PrincipalPrivilegeSet partColumnPrivileges = hive_db + .get_column_privilege_set(table.getDbName(), table.getTableName(), + partValues, col, this.getAuthenticator().getUserName(), this + .getAuthenticator().getGroupNames()); + + authorizePrivileges(partColumnPrivileges, inputRequiredPriv, inputCheck2, + outputRequiredPriv, outputCheck2); + + if (inputCheck2 != null) { + booleanArrayOr(inputCheck2, inputCheck); + } + if (outputCheck2 != null) { + booleanArrayOr(inputCheck2, inputCheck); + } + + checkAndThrowAuthorizationException(inputRequiredPriv, + outputRequiredPriv, inputCheck2, outputCheck2, table.getDbName(), + table.getTableName(), partName, col); + } + + return true; + } + + protected boolean authorizeUserPriv(Privilege[] inputRequiredPriv, + boolean[] inputCheck, Privilege[] outputRequiredPriv, + boolean[] outputCheck) throws HiveException { + PrincipalPrivilegeSet privileges = hive_db.get_user_privilege_set(this + .getAuthenticator().getUserName(), this.getAuthenticator() + .getGroupNames()); + return authorizePrivileges(privileges, inputRequiredPriv, inputCheck, + outputRequiredPriv, outputCheck); + } + + private boolean authorizeUserAndDBPriv(Database db, + Privilege[] inputRequiredPriv, Privilege[] outputRequiredPriv, + boolean[] inputCheck, boolean[] outputCheck) throws HiveException { + if (authorizeUserPriv(inputRequiredPriv, inputCheck, outputRequiredPriv, + outputCheck)) { + return true; + } + + PrincipalPrivilegeSet dbPrivileges = hive_db.get_db_privilege_set(db + .getName(), this.getAuthenticator().getUserName(), this + .getAuthenticator().getGroupNames()); + + if (authorizePrivileges(dbPrivileges, inputRequiredPriv, inputCheck, + outputRequiredPriv, outputCheck)) { + return true; + } + + return false; + } + + private boolean authorizeUserDBAndTable(Table table, + Privilege[] inputRequiredPriv, Privilege[] outputRequiredPriv, + boolean[] inputCheck, boolean[] outputCheck) throws HiveException { + + if (authorizeUserAndDBPriv(hive_db.getDatabase(table.getDbName()), + inputRequiredPriv, outputRequiredPriv, inputCheck, outputCheck)) { + return true; + } + + PrincipalPrivilegeSet tablePrivileges = hive_db.get_table_privilege_set( + table.getDbName(), table.getTableName(), this.getAuthenticator() + .getUserName(), this.getAuthenticator().getGroupNames()); + + if (authorizePrivileges(tablePrivileges, inputRequiredPriv, inputCheck, + outputRequiredPriv, outputCheck)) { + return true; + } + + return false; + } + + private boolean authorizeUserDbAndPartition(Partition part, + Privilege[] inputRequiredPriv, Privilege[] outputRequiredPriv, + boolean[] inputCheck, boolean[] outputCheck) throws HiveException { + + if (authorizeUserAndDBPriv( + hive_db.getDatabase(part.getTable().getDbName()), inputRequiredPriv, + outputRequiredPriv, inputCheck, outputCheck)) { + return true; + } + + PrincipalPrivilegeSet partPrivileges = part.getTPartition().getPrivileges(); + if(partPrivileges == null) { + partPrivileges = hive_db + .get_partition_privilege_set(part.getTable().getDbName(), part + .getTable().getTableName(), part.getValues(), this.getAuthenticator() + .getUserName(), this.getAuthenticator().getGroupNames()); + } + + if (authorizePrivileges(partPrivileges, inputRequiredPriv, inputCheck, + outputRequiredPriv, outputCheck)) { + return true; + } + + return false; + } + + protected boolean authorizePrivileges(PrincipalPrivilegeSet privileges, + Privilege[] inputPriv, boolean[] inputCheck, Privilege[] outputPriv, + boolean[] outputCheck) throws HiveException { + + boolean pass = true; + if (inputPriv != null) { + pass = pass && matchPrivs(inputPriv, privileges, inputCheck); + } + if (outputPriv != null) { + pass = pass && matchPrivs(outputPriv, privileges, outputCheck); + } + return pass; + } + + /** + * try to match an array of privileges from user/groups/roles grants. + * + * @param container + */ + private boolean matchPrivs(Privilege[] inputPriv, + PrincipalPrivilegeSet privileges, boolean[] check) { + + if (inputPriv == null) + return true; + + if (privileges == null) + return false; + + /* + * user grants + */ + Set privSet = new HashSet(); + if (privileges.getUserPrivileges() != null + && privileges.getUserPrivileges().size() > 0) { + Collection> privCollection = privileges.getUserPrivileges().values(); + + List userPrivs = getPrivilegeStringList(privCollection); + if (userPrivs != null && userPrivs.size() > 0) { + for (String priv : userPrivs) { + if (priv == null || priv.trim().equals("")) + continue; + if (priv.equalsIgnoreCase(Privilege.ALL.toString())) { + setBooleanArray(check, true); + return true; + } + privSet.add(priv.toLowerCase()); + } + } + } + + /* + * group grants + */ + if (privileges.getGroupPrivileges() != null + && privileges.getGroupPrivileges().size() > 0) { + Collection> groupPrivCollection = privileges + .getGroupPrivileges().values(); + List groupPrivs = getPrivilegeStringList(groupPrivCollection); + if (groupPrivs != null && groupPrivs.size() > 0) { + for (String priv : groupPrivs) { + if (priv == null || priv.trim().equals("")) + continue; + if (priv.equalsIgnoreCase(Privilege.ALL.toString())) { + setBooleanArray(check, true); + return true; + } + privSet.add(priv.toLowerCase()); + } + } + } + + /* + * roles grants + */ + if (privileges.getRolePrivileges() != null + && privileges.getRolePrivileges().size() > 0) { + Collection> rolePrivsCollection = privileges + .getRolePrivileges().values(); + ; + List rolePrivs = getPrivilegeStringList(rolePrivsCollection); + if (rolePrivs != null && rolePrivs.size() > 0) { + for (String priv : rolePrivs) { + if (priv == null || priv.trim().equals("")) + continue; + if (priv.equalsIgnoreCase(Privilege.ALL.toString())) { + setBooleanArray(check, true); + return true; + } + privSet.add(priv.toLowerCase()); + } + } + } + + for (int i = 0; i < inputPriv.length; i++) { + String toMatch = inputPriv[i].getPriv(); + if (!check[i]) { + check[i] = privSet.contains(toMatch.toLowerCase()); + } + } + + return firstFalseIndex(check) <0; + } + + private List getPrivilegeStringList( + Collection> privCollection) { + List userPrivs = new ArrayList(); + if (privCollection!= null && privCollection.size()>0) { + for (List grantList : privCollection) { + if (grantList == null){ + continue; + } + for (int i = 0; i < grantList.size(); i++) { + PrivilegeGrantInfo grant = grantList.get(i); + userPrivs.add(grant.getPrivilege()); + } + } + } + return userPrivs; + } + + private static void setBooleanArray(boolean[] check, boolean b) { + for (int i = 0; i < check.length; i++) { + check[i] = b; + } + } + + private static void booleanArrayOr(boolean[] output, boolean[] input) { + for (int i = 0; i < output.length && i < input.length; i++) { + output[i] = output[i] || input[i]; + } + } + + private void checkAndThrowAuthorizationException( + Privilege[] inputRequiredPriv, Privilege[] outputRequiredPriv, + boolean[] inputCheck, boolean[] outputCheck,String dbName, + String tableName, String partitionName, String columnName) { + + String hiveObject = "{ "; + if (dbName != null) { + hiveObject = hiveObject + "database:" + dbName; + } + if (tableName != null) { + hiveObject = hiveObject + ", table:" + tableName; + } + if (partitionName != null) { + hiveObject = hiveObject + ", partitionName:" + partitionName; + } + if (columnName != null) { + hiveObject = hiveObject + ", columnName:" + columnName; + } + hiveObject = hiveObject + "}"; + + if (inputCheck != null) { + int input = this.firstFalseIndex(inputCheck); + if (input >= 0) { + throw new AuthorizationException("No privilege '" + + inputRequiredPriv[input].getPriv() + "' found for inputs " + + hiveObject); + } + } + + if (outputCheck != null) { + int output = this.firstFalseIndex(outputCheck); + if (output >= 0) { + throw new AuthorizationException("No privilege '" + + outputRequiredPriv[output].getPriv() + "' found for outputs " + + hiveObject); + } + } + } + + private int firstFalseIndex(boolean[] inputCheck) { + if (inputCheck != null) { + for (int i = 0; i < inputCheck.length; i++) { + if (!inputCheck[i]) { + return i; + } + } + } + return -1; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java (revision 0) @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.security.Authenticator; + +/** + * Hive's authorization provider manager's interface. + */ +public interface HiveAuthorizationProvider { + + public void init(Configuration conf) throws HiveException; + + public Authenticator getAuthenticator(); + + public void setAuthenticator(Authenticator authenticator); + + /** + * Authorization user level privileges. + * + * @param readRequiredPriv + * a list of privileges needed for inputs. + * @param writeRequiredPriv + * a list of privileges needed for outputs. + * @return + * @throws HiveException + * @throws AuthorizationException + */ + public boolean authorize(Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException; + + /** + * Authorization privileges against a database object. + * + * @param db + * database + * @param readRequiredPriv + * a list of privileges needed for inputs. + * @param writeRequiredPriv + * a list of privileges needed for outputs. + * @return + * @throws HiveException + * @throws AuthorizationException + */ + public boolean authorize(Database db, Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException; + + /** + * Authorization privileges against a hive table object. + * + * @param table + * table object + * @param readRequiredPriv + * a list of privileges needed for inputs. + * @param writeRequiredPriv + * a list of privileges needed for outputs. + * @return + * @throws HiveException + * @throws AuthorizationException + */ + public boolean authorize(Table table, Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException; + + /** + * Authorization privileges against a hive partition object. + * + * @param part + * partition object + * @param readRequiredPriv + * a list of privileges needed for inputs. + * @param writeRequiredPriv + * a list of privileges needed for outputs. + * @return + * @throws HiveException + * @throws AuthorizationException + */ + public boolean authorize(Partition part, Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, + AuthorizationException; + + /** + * Authorization privileges against a list of columns. If the partition object + * is not null, look at the column grants for the given partition. Otherwise + * look at the table column grants. + * + * @param table + * table object + * @param part + * partition object + * @param columns + * a list of columns + * @param readRequiredPriv + * a list of privileges needed for inputs. + * @param writeRequiredPriv + * a list of privileges needed for outputs. + * @return + * @throws HiveException + * @throws AuthorizationException + */ + public boolean authorize(Table table, Partition part, List columns, + Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException; + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java (revision 0) @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.security.Authenticator; + +public abstract class HiveAuthorizationProviderBase implements + HiveAuthorizationProvider { + protected Authenticator authenticator; + + protected Hive hive_db; + + public void init(Configuration conf) throws HiveException { + hive_db = Hive.get(new HiveConf(conf, HiveAuthorizationProvider.class)); + } + + public Authenticator getAuthenticator() { + return authenticator; + } + + public void setAuthenticator(Authenticator authenticator) { + this.authenticator = authenticator; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/Privilege.java (revision 0) @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.EnumSet; + +public class Privilege { + + private String priv; + + private EnumSet supportedScopeSet; + + private Privilege(String priv, EnumSet scopeSet) { + super(); + this.priv = priv; + this.supportedScopeSet = scopeSet; + } + + public Privilege(String priv) { + super(); + this.priv = priv; + + } + + public String getPriv() { + return priv; + } + + public void setPriv(String priv) { + this.priv = priv; + } + + public boolean supportColumnLevel() { + return supportedScopeSet != null + && supportedScopeSet.contains(PrivilegeScope.COLUMN_LEVEL_SCOPE); + } + + public boolean supportDBLevel() { + return supportedScopeSet != null + && supportedScopeSet.contains(PrivilegeScope.DB_LEVEL_SCOPE); + } + + public boolean supportTableLevel() { + return supportedScopeSet != null + && supportedScopeSet.contains(PrivilegeScope.TABLE_LEVEL_SCOPE); + } + + public String toString() { + return this.priv; + } + + public Privilege() { + } + + public static Privilege ALL = new Privilege("All", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege ALTER_METADATA = new Privilege("Alter", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege ALTER_DATA = new Privilege("Update", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege CREATE = new Privilege("Create", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege DROP = new Privilege("Drop", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege INDEX = new Privilege("Index", + PrivilegeScope.ALLSCOPE); + + public static Privilege LOCK = new Privilege("Lock", + PrivilegeScope.ALLSCOPE_EXCEPT_COLUMN); + + public static Privilege SELECT = new Privilege("Select", + PrivilegeScope.ALLSCOPE); + + public static Privilege SHOW_DATABASE = new Privilege("Show_Database", + EnumSet.of(PrivilegeScope.USER_LEVEL_SCOPE)); + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeRegistry.java (revision 0) @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.HashMap; +import java.util.Map; + +public class PrivilegeRegistry { + + protected static Map Registry = new HashMap(); + + static { + Registry.put(Privilege.ALL.getPriv().toLowerCase(), Privilege.ALL); + Registry.put(Privilege.ALTER_DATA.getPriv().toLowerCase(), + Privilege.ALTER_DATA); + Registry.put(Privilege.ALTER_METADATA.getPriv().toLowerCase(), + Privilege.ALTER_METADATA); + Registry.put(Privilege.CREATE.getPriv().toLowerCase(), Privilege.CREATE); + Registry.put(Privilege.DROP.getPriv().toLowerCase(), Privilege.DROP); + Registry.put(Privilege.INDEX.getPriv().toLowerCase(), Privilege.INDEX); + Registry.put(Privilege.LOCK.getPriv().toLowerCase(), Privilege.LOCK); + Registry.put(Privilege.SELECT.getPriv().toLowerCase(), Privilege.SELECT); + Registry.put(Privilege.SHOW_DATABASE.getPriv().toLowerCase(), + Privilege.SHOW_DATABASE); + } + + public static Privilege getPrivilege(String privilegeName) { + return Registry.get(privilegeName.toLowerCase()); + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java (revision 0) @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security.authorization; + +import java.util.EnumSet; + +/** + * PrivilegeScope describes a hive defined privilege's scope + * (global/database/table/column). For example some hive privileges are + * db-level only, some are global, and some are table only. + */ +public enum PrivilegeScope { + + USER_LEVEL_SCOPE((short) 0x01), + DB_LEVEL_SCOPE((short) 0x02), + TABLE_LEVEL_SCOPE((short) 0x04), + COLUMN_LEVEL_SCOPE((short) 0x08); + + private short mode; + + private PrivilegeScope(short mode) { + this.mode = mode; + } + + public short getMode() { + return mode; + } + + public void setMode(short mode) { + this.mode = mode; + } + + public static EnumSet ALLSCOPE = EnumSet.of( + PrivilegeScope.USER_LEVEL_SCOPE, PrivilegeScope.DB_LEVEL_SCOPE, + PrivilegeScope.TABLE_LEVEL_SCOPE, PrivilegeScope.COLUMN_LEVEL_SCOPE); + + public static EnumSet ALLSCOPE_EXCEPT_COLUMN = EnumSet.of( + PrivilegeScope.USER_LEVEL_SCOPE, PrivilegeScope.DB_LEVEL_SCOPE, + PrivilegeScope.TABLE_LEVEL_SCOPE); + +} Index: ql/src/java/org/apache/hadoop/hive/ql/session/CreateTableAutomaticGrant.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/session/CreateTableAutomaticGrant.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/session/CreateTableAutomaticGrant.java (revision 0) @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.session; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.security.authorization.Privilege; +import org.apache.hadoop.hive.ql.security.authorization.PrivilegeRegistry; + +public class CreateTableAutomaticGrant { + private Map> userGrants; + private Map> groupGrants; + private Map> roleGrants; + + public static CreateTableAutomaticGrant create(HiveConf conf) + throws HiveException { + CreateTableAutomaticGrant grants = new CreateTableAutomaticGrant(); + grants.userGrants = getGrantMap(HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_USER_GRANTS)); + grants.groupGrants = getGrantMap(HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS)); + grants.roleGrants = getGrantMap(HiveConf.getVar(conf, + HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS)); + + List ownerGrantInfoList = new ArrayList(); + String grantor = null; + if (SessionState.get() != null + && SessionState.get().getAuthenticator() != null) { + grantor = SessionState.get().getAuthenticator().getUserName(); + ownerGrantInfoList.add(new PrivilegeGrantInfo(Privilege.ALL.getPriv(), -1, grantor, + PrincipalType.USER, true)); + if (grants.userGrants == null) { + grants.userGrants = new HashMap>(); + } + grants.userGrants.put(grantor, ownerGrantInfoList); + } + return grants; + } + + private static Map> getGrantMap(String grantMapStr) + throws HiveException { + if (grantMapStr != null && !grantMapStr.trim().equals("")) { + String[] grantArrayStr = grantMapStr.split(";"); + Map> grantsMap = new HashMap>(); + for (String grantStr : grantArrayStr) { + String[] principalListAndPrivList = grantStr.split(":"); + if (principalListAndPrivList.length != 2 + || principalListAndPrivList[0] == null + || principalListAndPrivList[0].trim().equals("")) { + throw new HiveException( + "Can not understand the config privilege definition " + grantStr); + } + String userList = principalListAndPrivList[0]; + String privList = principalListAndPrivList[1]; + checkPrivilege(privList); + + String[] grantArray = privList.split(","); + List grantInfoList = new ArrayList(); + String grantor = null; + if (SessionState.get().getAuthenticator() != null) { + grantor = SessionState.get().getAuthenticator().getUserName(); + } + for (String grant : grantArray) { + grantInfoList.add(new PrivilegeGrantInfo(grant, -1, grantor, + PrincipalType.USER, true)); + } + + String[] users = userList.split(","); + for (String user : users) { + grantsMap.put(user, grantInfoList); + } + } + return grantsMap; + } + return null; + } + + private static void checkPrivilege(String ownerGrantsInConfig) + throws HiveException { + String[] ownerGrantArray = ownerGrantsInConfig.split(","); + // verify the config + for (String ownerGrant : ownerGrantArray) { + Privilege prive = PrivilegeRegistry.getPrivilege(ownerGrant); + if (prive == null) { + throw new HiveException("Privilege " + ownerGrant + " is not found."); + } + } + } + + public Map> getUserGrants() { + return userGrants; + } + + public Map> getGroupGrants() { + return groupGrants; + } + + public Map> getRoleGrants() { + return roleGrants; + } +} \ No newline at end of file Index: ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (revision 1050266) +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (working copy) @@ -40,6 +40,12 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.history.HiveHistory; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.HiveOperation; +import org.apache.hadoop.hive.ql.security.Authenticator; +import org.apache.hadoop.hive.ql.security.AuthenticatorFactory; +import org.apache.hadoop.hive.ql.security.authorization.AuthorizationManagerFactory; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; import org.apache.hadoop.hive.ql.util.DosToUnix; import org.apache.log4j.LogManager; import org.apache.log4j.PropertyConfigurator; @@ -77,8 +83,14 @@ /** * type of the command. */ - private String commandType; - + private HiveOperation commandType; + + private HiveAuthorizationProvider authorizer; + + private Authenticator authenticator; + + private CreateTableAutomaticGrant createTableGrants; + /** * Lineage state. */ @@ -150,11 +162,16 @@ /** * start a new session and set it to current session. + * @throws HiveException */ - public static SessionState start(HiveConf conf) { + public static SessionState start(HiveConf conf) throws HiveException { SessionState ss = new SessionState(conf); ss.getConf().setVar(HiveConf.ConfVars.HIVESESSIONID, makeSessionId()); ss.hiveHist = new HiveHistory(ss); + ss.authenticator = AuthenticatorFactory.getAuthenticator(conf); + ss.authorizer = AuthorizationManagerFactory.getAuthorizeProviderManager( + conf, ss.authenticator); + ss.createTableGrants = CreateTableAutomaticGrant.create(conf); tss.set(ss); return (ss); } @@ -163,6 +180,7 @@ * set current session to existing session object if a thread is running * multiple sessions - it must call this method with the new session object * when switching from one session to another. + * @throws HiveException */ public static SessionState start(SessionState startSs) { @@ -176,6 +194,18 @@ if (startSs.hiveHist == null) { startSs.hiveHist = new HiveHistory(startSs); } + + try { + startSs.authenticator = AuthenticatorFactory.getAuthenticator(startSs + .getConf()); + startSs.authorizer = AuthorizationManagerFactory + .getAuthorizeProviderManager(startSs.getConf(), startSs.authenticator); + startSs.createTableGrants = CreateTableAutomaticGrant.create(startSs + .getConf()); + } catch (HiveException e) { + throw new RuntimeException(e); + } + return startSs; } @@ -539,10 +569,38 @@ } public String getCommandType() { + return commandType.getOperationName(); + } + + public HiveOperation getHiveOperation() { return commandType; } - public void setCommandType(String commandType) { + public void setCommandType(HiveOperation commandType) { this.commandType = commandType; } + + public HiveAuthorizationProvider getAuthorizer() { + return authorizer; + } + + public void setAuthorizer(HiveAuthorizationProvider authorizer) { + this.authorizer = authorizer; + } + + public Authenticator getAuthenticator() { + return authenticator; + } + + public void setAuthenticator(Authenticator authenticator) { + this.authenticator = authenticator; + } + + public CreateTableAutomaticGrant getCreateTableGrants() { + return createTableGrants; + } + + public void setCreateTableGrants(CreateTableAutomaticGrant createTableGrants) { + this.createTableGrants = createTableGrants; + } } Index: ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (revision 1050266) +++ ql/src/test/org/apache/hadoop/hive/ql/QTestUtil.java (working copy) @@ -24,10 +24,12 @@ import java.io.DataInputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; import java.io.PrintStream; import java.io.Serializable; +import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.Arrays; import java.util.Deque; @@ -389,6 +391,9 @@ } public void createSources() throws Exception { + + startSessionState(); + // Create a bunch of tables with columns key and value LinkedList cols = new LinkedList(); cols.add("key"); @@ -490,7 +495,8 @@ testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); // conf.logVars(System.out); // System.out.flush(); - + + SessionState.start(conf); db = Hive.get(conf); fs = FileSystem.get(conf); drv = new Driver(conf); @@ -541,6 +547,8 @@ createSources(); } + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, + "org.apache.hadoop.hive.ql.security.DummyAuthenticator"); CliSessionState ss = new CliSessionState(conf); assert ss != null; ss.in = System.in; @@ -554,7 +562,7 @@ ss.err = ss.out; ss.setIsSilent(true); SessionState oldSs = SessionState.get(); - if (oldSs != null) { + if (oldSs != null && oldSs.out != null && oldSs.out != System.out) { oldSs.out.close(); } SessionState.start(ss); @@ -566,6 +574,19 @@ cliDriver.processInitFiles(ss); } + private CliSessionState startSessionState() + throws FileNotFoundException, UnsupportedEncodingException { + + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, + "org.apache.hadoop.hive.ql.security.DummyAuthenticator"); + + CliSessionState ss = new CliSessionState(conf); + assert ss != null; + + SessionState.start(ss); + return ss; + } + public int executeOne(String tname) { String q = qMap.get(tname); @@ -898,6 +919,7 @@ "-I", "at junit", "-I", "Caused by:", "-I", "LOCK_QUERYID:", + "-I", "grantTime", "-I", "[.][.][.] [0-9]* more", (new File(logDir, tname + ".out")).getPath(), outFileName }; Index: ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java (revision 1050266) +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSemanticAnalyzerHookLoading.java (working copy) @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; public class TestSemanticAnalyzerHookLoading extends TestCase { @@ -35,6 +36,7 @@ HiveConf conf = new HiveConf(this.getClass()); conf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, DummySemanticAnalyzerHook.class.getName()); conf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + SessionState.start(conf); Driver driver = new Driver(conf); driver.run("drop table testDL"); Index: ql/src/test/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java (revision 0) +++ ql/src/test/org/apache/hadoop/hive/ql/security/DummyAuthenticator.java (revision 0) @@ -0,0 +1,39 @@ +package org.apache.hadoop.hive.ql.security; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; + +public class DummyAuthenticator implements Authenticator { + + private List groupNames; + private String userName; + + public DummyAuthenticator() { + this.groupNames = new ArrayList(); + groupNames.add("hive_test_group1"); + groupNames.add("hive_test_group2"); + userName = "hive_test_user"; + } + + @Override + public boolean destroy() { + return true; + } + + @Override + public List getGroupNames() { + return groupNames; + } + + @Override + public String getUserName() { + return userName; + } + + @Override + public void init(Configuration conf) { + } + +} Index: ql/src/test/queries/clientnegative/authorization_fail_1.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_1.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_1.q (revision 0) @@ -0,0 +1,9 @@ +create table authorization_fail_1 (key int, value string); +set hive.security.authorization.enabled=true; + +revoke `ALL` on table authorization_fail_1 from user hive_test_user; + +grant `Create` on table authorization_fail_1 to user hive_test_user; +grant `Create` on table authorization_fail_1 to user hive_test_user; + + Index: ql/src/test/queries/clientnegative/authorization_fail_2.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_2.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_2.q (revision 0) @@ -0,0 +1,9 @@ +create table authorization_fail_2 (key int, value string) partitioned by (ds string); + +revoke `ALL` on table authorization_fail_2 from user hive_test_user; + +set hive.security.authorization.enabled=true; + +alter table authorization_fail_2 add partition (ds='2010'); + + Index: ql/src/test/queries/clientnegative/authorization_fail_3.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_3.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_3.q (revision 0) @@ -0,0 +1,12 @@ +create table authorization_fail_3 (key int, value string) partitioned by (ds string); +set hive.security.authorization.enabled=true; + +revoke `ALL` on table authorization_fail_3 from user hive_test_user; + +grant `Create` on table authorization_fail_3 to user hive_test_user; +alter table authorization_fail_3 add partition (ds='2010'); + +show grant user hive_test_user on table authorization_fail_3; +show grant user hive_test_user on table authorization_fail_3 partition (ds='2010'); + +select key from authorization_fail_3 where ds='2010'; Index: ql/src/test/queries/clientnegative/authorization_fail_4.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_4.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_4.q (revision 0) @@ -0,0 +1,15 @@ +create table authorization_fail_4 (key int, value string) partitioned by (ds string); + +revoke `ALL` on table authorization_fail_4 from user hive_test_user; + +set hive.security.authorization.enabled=true; +grant `Alter` on table authorization_fail_4 to user hive_test_user; +ALTER TABLE authorization_fail_4 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); + +grant `Create` on table authorization_fail_4 to user hive_test_user; +alter table authorization_fail_4 add partition (ds='2010'); + +show grant user hive_test_user on table authorization_fail_4; +show grant user hive_test_user on table authorization_fail_4 partition (ds='2010'); + +select key from authorization_fail_4 where ds='2010'; Index: ql/src/test/queries/clientnegative/authorization_fail_5.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_5.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_5.q (revision 0) @@ -0,0 +1,20 @@ +create table authorization_fail (key int, value string) partitioned by (ds string); +set hive.security.authorization.enabled=true; + +revoke `ALL` on table authorization_fail from user hive_test_user; + +grant `Alter` on table authorization_fail to user hive_test_user; +ALTER TABLE authorization_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); + +grant `Create` on table authorization_fail to user hive_test_user; +grant `Select` on table authorization_fail to user hive_test_user; +alter table authorization_fail add partition (ds='2010'); + +show grant user hive_test_user on table authorization_fail; +show grant user hive_test_user on table authorization_fail partition (ds='2010'); + +revoke `Select` on table authorization_fail partition (ds='2010') from user hive_test_user; + +show grant user hive_test_user on table authorization_fail partition (ds='2010'); + +select key from authorization_fail where ds='2010'; \ No newline at end of file Index: ql/src/test/queries/clientnegative/authorization_fail_6.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_6.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_6.q (revision 0) @@ -0,0 +1,5 @@ +create table authorization_part_fail (key int, value string) partitioned by (ds string); +revoke `ALL` on table authorization_part_fail from user hive_test_user; +set hive.security.authorization.enabled=true; + +ALTER TABLE authorization_part_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); Index: ql/src/test/queries/clientnegative/authorization_fail_7.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_fail_7.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_fail_7.q (revision 0) @@ -0,0 +1,16 @@ +create table authorization_fail (key int, value string); +revoke `ALL` on table authorization_fail from user hive_test_user; + +set hive.security.authorization.enabled=true; + +create role hive_test_role_fail; + +grant role hive_test_role_fail to user hive_test_user; +grant `select` on table authorization_fail to role hive_test_role_fail; +show role grant user hive_test_user; + +show grant role hive_test_role_fail on table authorization_fail; + +drop role hive_test_role_fail; + +select key from authorization_fail; \ No newline at end of file Index: ql/src/test/queries/clientnegative/authorization_part.q =================================================================== --- ql/src/test/queries/clientnegative/authorization_part.q (revision 0) +++ ql/src/test/queries/clientnegative/authorization_part.q (revision 0) @@ -0,0 +1,35 @@ +create table authorization_part_fail (key int, value string) partitioned by (ds string); +ALTER TABLE authorization_part_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +set hive.security.authorization.enabled=true; + +revoke `ALL` on table authorization_part_fail from user hive_test_user; + +grant `Create` on table authorization_part_fail to user hive_test_user; +grant `Update` on table authorization_part_fail to user hive_test_user; +grant `Drop` on table authorization_part_fail to user hive_test_user; +grant `select` on table src to user hive_test_user; + +-- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1; +grant `select` on table authorization_part_fail to group hive_test_group1; + +show grant group hive_test_group1 on table authorization_part_fail; + +insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010'); +select key, value from authorization_part_fail where ds='2010' order by key limit 20; + +insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src; +show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011'); +show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011'); +select key, value from authorization_part_fail where ds='2011' order by key limit 20; + +select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20; + +revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1; + +select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20; + +drop table authorization_part_fail; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_1.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_1.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_1.q (revision 0) @@ -0,0 +1,89 @@ +create table src_autho_test as select * from src; + +revoke `ALL` on table src_autho_test from user hive_test_user; + +set hive.security.authorization.enabled=true; + +--table grant to user + +grant `select` on table src_autho_test to user hive_test_user; + +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select` on table src_autho_test from user hive_test_user; +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +--column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user; + +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from user hive_test_user; +show grant user hive_test_user on table src_autho_test; +show grant user hive_test_user on table src_autho_test(key); + +--table grant to group + +grant `select` on table src_autho_test to group hive_test_group1; + +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select` on table src_autho_test from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +--column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1; + +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from group hive_test_group1; +show grant group hive_test_group1 on table src_autho_test; +show grant group hive_test_group1 on table src_autho_test(key); + +--role +create role src_role; +grant role src_role to user hive_test_user; +show role grant user hive_test_user; + +--column grant to role + +grant `select`(key) on table src_autho_test to role src_role; + +show grant role src_role on table src_autho_test; +show grant role src_role on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +revoke `select`(key) on table src_autho_test from role src_role; + +--table grant to role + +grant `select` on table src_autho_test to role src_role; + +select key from src_autho_test order by key limit 20; + +show grant role src_role on table src_autho_test; +show grant role src_role on table src_autho_test(key); +revoke `select` on table src_autho_test from role src_role; + +-- drop role +drop role src_role; + +set hive.security.authorization.enabled=false; +drop table src_autho_test; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_2.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_2.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_2.q (revision 0) @@ -0,0 +1,111 @@ +create table authorization_part (key int, value string) partitioned by (ds string); + +revoke `ALL` on table authorization_part from user hive_test_user; + +ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE"); +set hive.security.authorization.enabled=true; + +-- column grant to user +grant `Create` on table authorization_part to user hive_test_user; +grant `Update` on table authorization_part to user hive_test_user; +grant `Drop` on table authorization_part to user hive_test_user; +grant `select` on table src to user hive_test_user; + +show grant user hive_test_user on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +grant `select`(key) on table authorization_part to user hive_test_user; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); +show grant user hive_test_user on table authorization_part(key); +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part from user hive_test_user; +show grant user hive_test_user on table authorization_part(key); +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user; +show grant user hive_test_user on table authorization_part(key) partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- table grant to user +show grant user hive_test_user on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +grant `select` on table authorization_part to user hive_test_user; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant user hive_test_user on table authorization_part partition (ds='2010'); +show grant user hive_test_user on table authorization_part; +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part from user hive_test_user; +show grant user hive_test_user on table authorization_part; +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user; +show grant user hive_test_user on table authorization_part partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- column grant to group + +show grant group hive_test_group1 on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +grant `select`(key) on table authorization_part to group hive_test_group1; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part(key); +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part(key); +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010'); + +alter table authorization_part drop partition (ds='2010'); + +-- table grant to group +show grant group hive_test_group1 on table authorization_part; + +alter table authorization_part add partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +grant `select` on table authorization_part to group hive_test_group1; +alter table authorization_part drop partition (ds='2010'); +insert overwrite table authorization_part partition (ds='2010') select key, value from src; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); +show grant group hive_test_group1 on table authorization_part; +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + +select key from authorization_part where ds='2010' order by key limit 20; + +revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1; +show grant group hive_test_group1 on table authorization_part partition (ds='2010'); + + +revoke `select` on table src from user hive_test_user; +set hive.security.authorization.enabled=false; +drop table authorization_part; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_3.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_3.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_3.q (revision 0) @@ -0,0 +1,19 @@ +create table src_autho_test as select * from src; + +revoke `ALL` on table src_autho_test from user hive_test_user; + +grant `drop` on table src_autho_test to user hive_test_user; +grant `select` on table src_autho_test to user hive_test_user; + +show grant user hive_test_user on table src_autho_test; + +revoke `select` on table src_autho_test from user hive_test_user; +revoke `drop` on table src_autho_test from user hive_test_user; + +grant `drop`,`select` on table src_autho_test to user hive_test_user; +show grant user hive_test_user on table src_autho_test; +revoke `drop`,`select` on table src_autho_test from user hive_test_user; + +grant `drop`,`select`(key), `select`(value) on table src to user hive_test_user; +show grant user hive_test_user on table src_autho_test; +revoke `drop`,`select`(key), `select`(value) on table src from user hive_test_user; \ No newline at end of file Index: ql/src/test/queries/clientpositive/authorization_4.q =================================================================== --- ql/src/test/queries/clientpositive/authorization_4.q (revision 0) +++ ql/src/test/queries/clientpositive/authorization_4.q (revision 0) @@ -0,0 +1,9 @@ +create table src_autho_test as select * from src; + +set hive.security.authorization.enabled=true; + +show grant user hive_test_user on table src_autho_test; + +select key from src_autho_test order by key limit 20; + +drop table src_autho_test; \ No newline at end of file Index: ql/src/test/queries/clientpositive/input19.q =================================================================== --- ql/src/test/queries/clientpositive/input19.q (revision 1050266) +++ ql/src/test/queries/clientpositive/input19.q (working copy) @@ -1,5 +1,5 @@ -create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE; +create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE; LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog; SELECT a.* FROM apachelog a; Index: ql/src/test/queries/clientpositive/show_indexes_edge_cases.q =================================================================== --- ql/src/test/queries/clientpositive/show_indexes_edge_cases.q (revision 1050266) +++ ql/src/test/queries/clientpositive/show_indexes_edge_cases.q (working copy) @@ -21,5 +21,7 @@ EXPLAIN SHOW INDEXES ON show_idx_empty; SHOW INDEXES ON show_idx_empty; +DROP INDEX idx_1 on show_idx_full; +DROP INDEX idx_2 on show_idx_full; DROP TABLE show_idx_empty; DROP TABLE show_idx_full; \ No newline at end of file Index: ql/src/test/results/clientnegative/authorization_fail_1.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_1.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_1.q.out (revision 0) @@ -0,0 +1,17 @@ +PREHOOK: query: create table authorization_fail_1 (key int, value string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail_1 (key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail_1 +PREHOOK: query: revoke `ALL` on table authorization_fail_1 from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail_1 from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `Create` on table authorization_fail_1 to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_fail_1 to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Create` on table authorization_fail_1 to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +Error: java.lang.RuntimeException: InvalidObjectException(message:Create is already granted on table [default,authorization_fail_1] by hive_test_user) +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask Index: ql/src/test/results/clientnegative/authorization_fail_2.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_2.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_2.q.out (revision 0) @@ -0,0 +1,10 @@ +PREHOOK: query: create table authorization_fail_2 (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail_2 (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail_2 +PREHOOK: query: revoke `ALL` on table authorization_fail_2 from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail_2 from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +Authorization failed:No privilege 'Create' found for inputs { database:default, table:authorization_fail_2}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_fail_3.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_3.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_3.q.out (revision 0) @@ -0,0 +1,37 @@ +PREHOOK: query: create table authorization_fail_3 (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail_3 (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail_3 +PREHOOK: query: revoke `ALL` on table authorization_fail_3 from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail_3 from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `Create` on table authorization_fail_3 to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_fail_3 to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: alter table authorization_fail_3 add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_fail_3 +POSTHOOK: query: alter table authorization_fail_3 add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_fail_3 +POSTHOOK: Output: default@authorization_fail_3@ds=2010 +PREHOOK: query: show grant user hive_test_user on table authorization_fail_3 +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail_3 +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail_3 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292569774 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_fail_3 partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail_3 partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +Authorization failed:No privilege 'Select' found for inputs { database:default, table:authorization_fail_3, columnName:key}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_fail_4.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_4.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_4.q.out (revision 0) @@ -0,0 +1,75 @@ +PREHOOK: query: create table authorization_fail_4 (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail_4 (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail_4 +PREHOOK: query: revoke `ALL` on table authorization_fail_4 from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail_4 from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `Alter` on table authorization_fail_4 to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Alter` on table authorization_fail_4 to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: ALTER TABLE authorization_fail_4 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_fail_4 +PREHOOK: Output: default@authorization_fail_4 +POSTHOOK: query: ALTER TABLE authorization_fail_4 SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_fail_4 +POSTHOOK: Output: default@authorization_fail_4 +PREHOOK: query: grant `Create` on table authorization_fail_4 to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_fail_4 to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: alter table authorization_fail_4 add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_fail_4 +POSTHOOK: query: alter table authorization_fail_4 add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_fail_4 +POSTHOOK: Output: default@authorization_fail_4@ds=2010 +PREHOOK: query: show grant user hive_test_user on table authorization_fail_4 +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail_4 +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail_4 +principalName hive_test_user +principalType USER +privilege Alter +grantTime 1292569775 +grantor hive_test_user + +database default +table authorization_fail_4 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292569776 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_fail_4 partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail_4 partition (ds='2010') +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail_4 +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Alter +grantTime 1292569776 +grantor hive_test_user + +database default +table authorization_fail_4 +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292569776 +grantor hive_test_user +Authorization failed:No privilege 'Select' found for inputs { database:default, table:authorization_fail_4, partitionName:ds=2010, columnName:key}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_fail_5.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_5.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_5.q.out (revision 0) @@ -0,0 +1,122 @@ +PREHOOK: query: create table authorization_fail (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail +PREHOOK: query: revoke `ALL` on table authorization_fail from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `Alter` on table authorization_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Alter` on table authorization_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: ALTER TABLE authorization_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_fail +PREHOOK: Output: default@authorization_fail +POSTHOOK: query: ALTER TABLE authorization_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_fail +POSTHOOK: Output: default@authorization_fail +PREHOOK: query: grant `Create` on table authorization_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Select` on table authorization_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Select` on table authorization_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: alter table authorization_fail add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_fail +POSTHOOK: query: alter table authorization_fail add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_fail +POSTHOOK: Output: default@authorization_fail@ds=2010 +PREHOOK: query: show grant user hive_test_user on table authorization_fail +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail +principalName hive_test_user +principalType USER +privilege Alter +grantTime 1292570198 +grantor hive_test_user + +database default +table authorization_fail +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292570198 +grantor hive_test_user + +database default +table authorization_fail +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292570198 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_fail partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail partition (ds='2010') +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Alter +grantTime 1292570198 +grantor hive_test_user + +database default +table authorization_fail +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292570198 +grantor hive_test_user + +database default +table authorization_fail +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292570198 +grantor hive_test_user +PREHOOK: query: revoke `Select` on table authorization_fail partition (ds='2010') from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `Select` on table authorization_fail partition (ds='2010') from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table authorization_fail partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_fail partition (ds='2010') +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Alter +grantTime 1292570198 +grantor hive_test_user + +database default +table authorization_fail +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292570198 +grantor hive_test_user +Authorization failed:No privilege 'Select' found for inputs { database:default, table:authorization_fail, partitionName:ds=2010, columnName:key}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_fail_6.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_6.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_6.q.out (revision 0) @@ -0,0 +1,10 @@ +PREHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part_fail +PREHOOK: query: revoke `ALL` on table authorization_part_fail from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_part_fail from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +Authorization failed:No privilege 'Alter' found for inputs { database:default, table:authorization_part_fail}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_fail_7.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_fail_7.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_fail_7.q.out (revision 0) @@ -0,0 +1,44 @@ +PREHOOK: query: create table authorization_fail (key int, value string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_fail (key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_fail +PREHOOK: query: revoke `ALL` on table authorization_fail from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_fail from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: create role hive_test_role_fail +PREHOOK: type: CREATEROLE +POSTHOOK: query: create role hive_test_role_fail +POSTHOOK: type: CREATEROLE +PREHOOK: query: grant role hive_test_role_fail to user hive_test_user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: grant role hive_test_role_fail to user hive_test_user +POSTHOOK: type: GRANT_ROLE +PREHOOK: query: grant `select` on table authorization_fail to role hive_test_role_fail +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_fail to role hive_test_role_fail +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show role grant user hive_test_user +PREHOOK: type: SHOW_ROLE_GRANT +POSTHOOK: query: show role grant user hive_test_user +POSTHOOK: type: SHOW_ROLE_GRANT +role name:hive_test_role_fail +role name:hive_test_role_fail +PREHOOK: query: show grant role hive_test_role_fail on table authorization_fail +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role hive_test_role_fail on table authorization_fail +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_fail +principalName hive_test_role_fail +principalType ROLE +privilege Select +grantTime 1292570201 +grantor hive_test_user +PREHOOK: query: drop role hive_test_role_fail +PREHOOK: type: DROPROLE +POSTHOOK: query: drop role hive_test_role_fail +POSTHOOK: type: DROPROLE +Authorization failed:No privilege 'Select' found for inputs { database:default, table:authorization_fail, columnName:key}. Use show grant to get more details. Index: ql/src/test/results/clientnegative/authorization_part.q.out =================================================================== --- ql/src/test/results/clientnegative/authorization_part.q.out (revision 0) +++ ql/src/test/results/clientnegative/authorization_part.q.out (revision 0) @@ -0,0 +1,250 @@ +PREHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part_fail (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part_fail +PREHOOK: query: ALTER TABLE authorization_part_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_part_fail +PREHOOK: Output: default@authorization_part_fail +POSTHOOK: query: ALTER TABLE authorization_part_fail SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_part_fail +POSTHOOK: Output: default@authorization_part_fail +PREHOOK: query: revoke `ALL` on table authorization_part_fail from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_part_fail from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `Create` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Create` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Update` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Update` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Drop` on table authorization_part_fail to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Drop` on table authorization_part_fail to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table src to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table src to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: -- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: -- column grant to group + +grant `select`(key) on table authorization_part_fail to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table authorization_part_fail to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part_fail to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part_fail +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292570477 +grantor hive_test_user +PREHOOK: query: insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part_fail@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part_fail partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part_fail@ds=2010 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2010 +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292570485 +grantor hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2010 +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292570485 +grantor hive_test_user +PREHOOK: query: select key, value from authorization_part_fail where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-26_341_6045427400960012192/-mr-10000 +POSTHOOK: query: select key, value from authorization_part_fail where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-26_341_6045427400960012192/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +5 val_5 +5 val_5 +5 val_5 +8 val_8 +9 val_9 +10 val_10 +11 val_11 +12 val_12 +12 val_12 +15 val_15 +15 val_15 +17 val_17 +18 val_18 +18 val_18 +19 val_19 +PREHOOK: query: insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part_fail@ds=2011 +POSTHOOK: query: insert overwrite table authorization_part_fail partition (ds='2011') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part_fail@ds=2011 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail(key) partition (ds='2011') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2011 +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292570500 +grantor hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part_fail partition (ds='2011') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part_fail +partition ds=2011 +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292570500 +grantor hive_test_user +PREHOOK: query: select key, value from authorization_part_fail where ds='2011' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2011 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-40_573_6139158379776569501/-mr-10000 +POSTHOOK: query: select key, value from authorization_part_fail where ds='2011' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2011 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-40_573_6139158379776569501/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 +0 val_0 +0 val_0 +2 val_2 +4 val_4 +5 val_5 +5 val_5 +5 val_5 +8 val_8 +9 val_9 +10 val_10 +11 val_11 +12 val_12 +12 val_12 +15 val_15 +15 val_15 +17 val_17 +18 val_18 +18 val_18 +19 val_19 +PREHOOK: query: select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part_fail@ds=2010 +PREHOOK: Input: default@authorization_part_fail@ds=2011 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-46_703_8547570686064698117/-mr-10000 +POSTHOOK: query: select key,value, ds from authorization_part_fail where ds>='2010' order by key, ds limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part_fail@ds=2010 +POSTHOOK: Input: default@authorization_part_fail@ds=2011 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_23-21-46_703_8547570686064698117/-mr-10000 +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 val_0 2010 +0 val_0 2010 +0 val_0 2010 +0 val_0 2011 +0 val_0 2011 +0 val_0 2011 +2 val_2 2010 +2 val_2 2011 +4 val_4 2010 +4 val_4 2011 +5 val_5 2010 +5 val_5 2010 +5 val_5 2010 +5 val_5 2011 +5 val_5 2011 +5 val_5 2011 +8 val_8 2010 +8 val_8 2011 +9 val_9 2010 +9 val_9 2011 +PREHOOK: query: revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part_fail partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part_fail PARTITION(ds=2011).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +Authorization failed:No privilege 'Select' found for inputs { database:default, table:authorization_part_fail, partitionName:ds=2010, columnName:value}. Use show grant to get more details. Index: ql/src/test/results/clientpositive/alter4.q.out =================================================================== --- ql/src/test/results/clientpositive/alter4.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/alter4.q.out (working copy) @@ -10,13 +10,13 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:thiruvel, createTime:1286800231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286800231}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:heyongqiang, createTime:1290068480, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290068480}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@set_bucketing_test PREHOOK: Output: default@set_bucketing_test POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@set_bucketing_test POSTHOOK: Output: default@set_bucketing_test PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test @@ -26,7 +26,7 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:thiruvel, createTime:1286800231, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=thiruvel, last_modified_time=1286800231, transient_lastDdlTime=1286800231}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:default, owner:heyongqiang, createTime:1290068480, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1290068480, transient_lastDdlTime=1290068480}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: -- Cleanup DROP TABLE set_bucketing_test PREHOOK: type: DROPTABLE @@ -77,13 +77,13 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:thiruvel, createTime:1286800232, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286800232}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:heyongqiang, createTime:1290068481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:10, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290068481}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: alter4_db@set_bucketing_test PREHOOK: Output: alter4_db@set_bucketing_test POSTHOOK: query: ALTER TABLE set_bucketing_test NOT CLUSTERED -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: alter4_db@set_bucketing_test POSTHOOK: Output: alter4_db@set_bucketing_test PREHOOK: query: DESCRIBE EXTENDED set_bucketing_test @@ -93,7 +93,7 @@ key int value string -Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:thiruvel, createTime:1286800232, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=thiruvel, last_modified_time=1286800232, transient_lastDdlTime=1286800232}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:set_bucketing_test, dbName:alter4_db, owner:heyongqiang, createTime:1290068481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/alter4_db.db/set_bucketing_test, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{last_modified_by=heyongqiang, last_modified_time=1290068481, transient_lastDdlTime=1290068481}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: DROP TABLE set_bucketing_test PREHOOK: type: DROPTABLE PREHOOK: Input: alter4_db@set_bucketing_test Index: ql/src/test/results/clientpositive/authorization_1.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_1.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_1.q.out (revision 0) @@ -0,0 +1,412 @@ +PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: revoke `ALL` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: --table grant to user + +grant `select` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to user + +grant `select` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567447 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-30-47_550_1512695429257033493/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-30-47_550_1512695429257033493/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to user + +grant `select`(key) on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567455 +grantor hive_test_user +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-30-55_772_6274083496243050072/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-30-55_772_6274083496243050072/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --table grant to group + +grant `select` on table src_autho_test to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to group + +grant `select` on table src_autho_test to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567463 +grantor hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-03_700_7275195249200178582/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-03_700_7275195249200178582/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select` on table src_autho_test from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to group + +grant `select`(key) on table src_autho_test to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567471 +grantor hive_test_user +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-11_544_6787651330159557242/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-11_544_6787651330159557242/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: --role +create role src_role +PREHOOK: type: CREATEROLE +POSTHOOK: query: --role +create role src_role +POSTHOOK: type: CREATEROLE +PREHOOK: query: grant role src_role to user hive_test_user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: grant role src_role to user hive_test_user +POSTHOOK: type: GRANT_ROLE +PREHOOK: query: show role grant user hive_test_user +PREHOOK: type: SHOW_ROLE_GRANT +POSTHOOK: query: show role grant user hive_test_user +POSTHOOK: type: SHOW_ROLE_GRANT +role name:src_role +role name:src_role +PREHOOK: query: --column grant to role + +grant `select`(key) on table src_autho_test to role src_role +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --column grant to role + +grant `select`(key) on table src_autho_test to role src_role +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: show grant role src_role on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +columnName key +principalName src_role +principalType ROLE +privilege Select +grantTime 1292567480 +grantor hive_test_user +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-20_263_1898304116616741455/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-20_263_1898304116616741455/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: revoke `select`(key) on table src_autho_test from role src_role +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table src_autho_test from role src_role +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: --table grant to role + +grant `select` on table src_autho_test to role src_role +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: --table grant to role + +grant `select` on table src_autho_test to role src_role +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-26_813_7645696827575089587/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-26_813_7645696827575089587/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName src_role +principalType ROLE +privilege Select +grantTime 1292567486 +grantor hive_test_user +PREHOOK: query: show grant role src_role on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: revoke `select` on table src_autho_test from role src_role +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from role src_role +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: -- drop role +drop role src_role +PREHOOK: type: DROPROLE +POSTHOOK: query: -- drop role +drop role src_role +POSTHOOK: type: DROPROLE +PREHOOK: query: drop table src_autho_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: drop table src_autho_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: default@src_autho_test Index: ql/src/test/results/clientpositive/authorization_2.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_2.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_2.q.out (revision 0) @@ -0,0 +1,1184 @@ +PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: type: CREATETABLE +POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: default@authorization_part +PREHOOK: query: revoke `ALL` on table authorization_part from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table authorization_part from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +PREHOOK: type: ALTERTABLE_PROPERTIES +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") +POSTHOOK: type: ALTERTABLE_PROPERTIES +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +PREHOOK: query: -- column grant to user +grant `Create` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: -- column grant to user +grant `Create` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Update` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Update` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `Drop` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `Drop` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table src to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table src to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567495 +grantor hive_test_user +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567495 +grantor hive_test_user +PREHOOK: query: grant `select`(key) on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select`(key) on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567504 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567495 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-44_729_6159445007917571115/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-44_729_6159445007917571115/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567504 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-51_427_8272622381150203232/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-31-51_427_8272622381150203232/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- table grant to user +show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- table grant to user +show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567495 +grantor hive_test_user +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567519 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567519 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567519 +grantor hive_test_user +PREHOOK: query: grant `select` on table authorization_part to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567529 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567519 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-09_525_5244658946776244269/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-09_525_5244658946776244269/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567495 +grantor hive_test_user + +database default +table authorization_part +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567495 +grantor hive_test_user +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567529 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-15_947_8100424728454115880/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-15_947_8100424728454115880/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Create +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Update +grantTime 1292567529 +grantor hive_test_user + +database default +table authorization_part +partition ds=2010 +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567529 +grantor hive_test_user +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- column grant to group + +show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- column grant to group + +show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant `select`(key) on table authorization_part to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select`(key) on table authorization_part to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567551 +grantor hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567543 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-32_179_1859237138227238635/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-32_179_1859237138227238635/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +columnName key +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567551 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-38_657_7188861192410002773/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-38_657_7188861192410002773/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select`(key) on table authorization_part partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part(key) partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- table grant to group +show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- table grant to group +show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part add partition (ds='2010') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Input: default@authorization_part +POSTHOOK: query: alter table authorization_part add partition (ds='2010') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: grant `select` on table authorization_part to group hive_test_group1 +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table authorization_part to group hive_test_group1 +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: alter table authorization_part drop partition (ds='2010') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: alter table authorization_part drop partition (ds='2010') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: query: insert overwrite table authorization_part partition (ds='2010') select key, value from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@authorization_part@ds=2010 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567574 +grantor hive_test_user +PREHOOK: query: show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567566 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-55_104_6462994010195124243/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-32-55_104_6462994010195124243/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] + +database default +table authorization_part +partition ds=2010 +principalName hive_test_group1 +principalType GROUP +privilege Select +grantTime 1292567574 +grantor hive_test_user +PREHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@authorization_part@ds=2010 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-33-01_749_5408406852569793238/-mr-10000 +POSTHOOK: query: select key from authorization_part where ds='2010' order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@authorization_part@ds=2010 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-33-01_749_5408406852569793238/-mr-10000 +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +0 +0 +0 +2 +4 +5 +5 +5 +8 +9 +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +PREHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1 +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table authorization_part partition (ds='2010') from group hive_test_group1 +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part partition (ds='2010') +POSTHOOK: type: SHOW_GRANT +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: revoke `select` on table src from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: drop table authorization_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@authorization_part +PREHOOK: Output: default@authorization_part +POSTHOOK: query: drop table authorization_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@authorization_part +POSTHOOK: Output: default@authorization_part +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: authorization_part PARTITION(ds=2010).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/authorization_3.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_3.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_3.q.out (revision 0) @@ -0,0 +1,87 @@ +PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: revoke `ALL` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `ALL` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `drop` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `drop` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: grant `select` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `select` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567595 +grantor hive_test_user + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567595 +grantor hive_test_user +PREHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `select` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: revoke `drop` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `drop` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `drop`,`select` on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `drop`,`select` on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege Drop +grantTime 1292567595 +grantor hive_test_user + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege Select +grantTime 1292567595 +grantor hive_test_user +PREHOOK: query: revoke `drop`,`select` on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `drop`,`select` on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +PREHOOK: query: grant `drop`,`select`(key), `select`(value) on table src to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: grant `drop`,`select`(key), `select`(value) on table src to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT +PREHOOK: query: revoke `drop`,`select`(key), `select`(value) on table src from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: revoke `drop`,`select`(key), `select`(value) on table src from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE Index: ql/src/test/results/clientpositive/authorization_4.q.out =================================================================== --- ql/src/test/results/clientpositive/authorization_4.q.out (revision 0) +++ ql/src/test/results/clientpositive/authorization_4.q.out (revision 0) @@ -0,0 +1,55 @@ +PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +POSTHOOK: query: create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT + +database default +table src_autho_test +principalName hive_test_user +principalType USER +privilege All +grantTime 1292567601 +grantor hive_test_user +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-33-22_016_7487510540844272228/-mr-10000 +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-12-16_22-33-22_016_7487510540844272228/-mr-10000 +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: drop table src_autho_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: drop table src_autho_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: default@src_autho_test Index: ql/src/test/results/clientpositive/bucket_groupby.q.out =================================================================== --- ql/src/test/results/clientpositive/bucket_groupby.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/bucket_groupby.q.out (working copy) @@ -11,7 +11,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1288389460}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{transient_lastDdlTime=1290108934}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='100') select key, value from src sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -107,11 +107,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=100 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-45_369_7380463323239974897/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-35-47_282_4113045751057786056/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='100' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=100 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-45_369_7380463323239974897/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-35-47_282_4113045751057786056/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] 0 3 @@ -125,11 +125,11 @@ 113 2 114 1 PREHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: alter table clustergroupby clustered by (key) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -144,7 +144,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, last_modified_by=sdong, last_modified_time=1288389468, transient_lastDdlTime=1288389468, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, last_modified_by=heyongqiang, last_modified_time=1290108955, transient_lastDdlTime=1290108955, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='101') select key, value from src distribute by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -246,11 +246,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-53_750_4694546524307257085/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-06_455_5652519335128156482/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-53_750_4694546524307257085/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-06_455_5652519335128156482/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -354,11 +354,11 @@ PREHOOK: query: select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-57_183_3467407082757519286/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-13_777_4853958758753332533/-mr-10000 POSTHOOK: query: select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-57-57_183_3467407082757519286/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-13_777_4853958758753332533/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -453,11 +453,11 @@ PREHOOK: query: select abs(length(key)), count(1) from clustergroupby where ds='101' group by abs(length(key)) limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-00_556_1225036933831928400/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-22_353_1148410217887255773/-mr-10000 POSTHOOK: query: select abs(length(key)), count(1) from clustergroupby where ds='101' group by abs(length(key)) limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-00_556_1225036933831928400/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-22_353_1148410217887255773/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -562,11 +562,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key,3 limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-03_994_2628288731314011109/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-29_836_2726844280049537094/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='101' group by key,3 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-03_994_2628288731314011109/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-29_836_2726844280049537094/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -675,11 +675,11 @@ PREHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-07_367_3684028022557451628/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-36_869_6506961229190926826/-mr-10000 POSTHOOK: query: select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-07_367_3684028022557451628/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-36_869_6506961229190926826/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -777,12 +777,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=100 PREHOOK: Input: default@clustergroupby@ds=101 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-10_796_7851925785141685773/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-45_158_7554312247658259364/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby group by key POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=100 POSTHOOK: Input: default@clustergroupby@ds=101 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-10_796_7851925785141685773/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-36-45_158_7554312247658259364/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1185,12 +1185,12 @@ PREHOOK: query: --sort columns-- alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: --sort columns-- alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1209,7 +1209,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value], sortCols:[Order(col:key, order:1), Order(col:value, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, last_modified_by=sdong, last_modified_time=1288389494, transient_lastDdlTime=1288389494, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value], sortCols:[Order(col:key, order:1), Order(col:value, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, last_modified_by=heyongqiang, last_modified_time=1290109014, transient_lastDdlTime=1290109014, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='102') select key, value from src distribute by value sort by key, value PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1313,11 +1313,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-19_617_397682497046947245/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-04_173_5662986070533466247/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-19_617_397682497046947245/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-04_173_5662986070533466247/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1423,11 +1423,11 @@ PREHOOK: query: select value, count(1) from clustergroupby where ds='102' group by value limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-23_046_1077430162048304187/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-11_984_9094618456601035974/-mr-10000 POSTHOOK: query: select value, count(1) from clustergroupby where ds='102' group by value limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-23_046_1077430162048304187/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-11_984_9094618456601035974/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1543,11 +1543,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key, value limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=102 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-28_476_3643193095660435074/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-18_617_5093814006573436611/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key, value limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=102 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-28_476_3643193095660435074/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-18_617_5093814006573436611/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1565,11 +1565,11 @@ 113 2 114 1 PREHOOK: query: alter table clustergroupby clustered by (value, key) sorted by (key) into 1 buckets -PREHOOK: type: null +PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby POSTHOOK: query: alter table clustergroupby clustered by (value, key) sorted by (key) into 1 buckets -POSTHOOK: type: null +POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1592,7 +1592,7 @@ value string ds string -Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:sdong, createTime:1288389460, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/data/users/sdong/www/hive-trunk/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value, key], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=3, numFiles=3, last_modified_by=sdong, last_modified_time=1288389511, transient_lastDdlTime=1288389511, numRows=1500, totalSize=17436}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:clustergroupby, dbName:default, owner:heyongqiang, createTime:1290108934, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/clustergroupby, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[value, key], sortCols:[Order(col:key, order:1)], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=3, numFiles=3, last_modified_by=heyongqiang, last_modified_time=1290109047, transient_lastDdlTime=1290109047, numRows=1500, totalSize=17436}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: insert overwrite table clustergroupby partition (ds='103') select key, value from src distribute by value, key sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1700,11 +1700,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='103' group by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=103 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-36_974_789554075807114106/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-38_066_2359801468488363119/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='103' group by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=103 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-36_974_789554075807114106/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-38_066_2359801468488363119/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -1824,11 +1824,11 @@ PREHOOK: query: select key, count(1) from clustergroupby where ds='103' group by value, key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@clustergroupby@ds=103 -PREHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-40_621_2070392858793462231/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-45_975_5803139738369049064/-mr-10000 POSTHOOK: query: select key, count(1) from clustergroupby where ds='103' group by value, key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby@ds=103 -POSTHOOK: Output: file:/tmp/sdong/hive_2010-10-29_14-58-40_621_2070392858793462231/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-37-45_975_5803139738369049064/-mr-10000 POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=100).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/create_default_prop.q.out =================================================================== --- ql/src/test/results/clientpositive/create_default_prop.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/create_default_prop.q.out (working copy) @@ -9,7 +9,7 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p1, dbName:default, owner:thiruvel, createTime:1286825949, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p1=v1, transient_lastDdlTime=1286825949, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p1, dbName:default, owner:heyongqiang, createTime:1290111690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p1=v1, transient_lastDdlTime=1290111690, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE table_p2 LIKE table_p1 PREHOOK: type: CREATETABLE POSTHOOK: query: CREATE TABLE table_p2 LIKE table_p1 @@ -21,12 +21,12 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p2, dbName:default, owner:thiruvel, createTime:1286825949, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{EXTERNAL=FALSE, p1=v1, transient_lastDdlTime=1286825949, P2=v21=v22=v23}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p2, dbName:default, owner:heyongqiang, createTime:1290111690, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290111690}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@table_p1 POSTHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@table_p1 POSTHOOK: Output: default@table_p3 PREHOOK: query: DESC EXTENDED table_p3 @@ -35,4 +35,4 @@ POSTHOOK: type: DESCTABLE a string -Detailed Table Information Table(tableName:table_p3, dbName:default, owner:thiruvel, createTime:1286825953, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/table_p3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p3=v3, transient_lastDdlTime=1286825953}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:table_p3, dbName:default, owner:heyongqiang, createTime:1290111696, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:a, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/table_p3, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{p3=v3, transient_lastDdlTime=1290111696}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/ctas.q.out =================================================================== --- ql/src/test/results/clientpositive/ctas.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/ctas.q.out (working copy) @@ -6,15 +6,15 @@ PREHOOK: query: select * from nzhang_Tmp PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_tmp -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-50_551_5274699533452501897/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_152_6607893365576187007/-mr-10000 POSTHOOK: query: select * from nzhang_Tmp POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_tmp -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-50_551_5274699533452501897/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_152_6607893365576187007/-mr-10000 PREHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_CTAS1 TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key) k) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL k)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -64,7 +64,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-08-50_746_7813189274106710723/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-38_294_2534653608830924093/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -92,7 +92,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas1 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas1 Stage: Stage-3 Create Table Operator: @@ -107,20 +107,20 @@ PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_CTAS1 PREHOOK: query: select * from nzhang_CTAS1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-57_963_8224718269640555492/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-54_890_340708639379092873/-mr-10000 POSTHOOK: query: select * from nzhang_CTAS1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-08-57_963_8224718269640555492/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-54_890_340708639379092873/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -132,9 +132,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas2 as select * from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas2 TOK_LIKETABLE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -184,7 +184,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-08-58_369_8941154562114122989/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-21-55_113_2975775360233211417/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -212,7 +212,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas2 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas2 Stage: Stage-3 Create Table Operator: @@ -227,20 +227,20 @@ PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas2 PREHOOK: query: select * from nzhang_ctas2 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas2 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-06_188_7275645807830573530/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_092_5599426394139957980/-mr-10000 POSTHOOK: query: select * from nzhang_ctas2 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas2 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-06_188_7275645807830573530/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_092_5599426394139957980/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -252,9 +252,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas3 TOK_LIKETABLE (TOK_TABLESERIALIZER (TOK_SERDENAME "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) TOK_TBLRCFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (/ (TOK_TABLE_OR_COL key) 2) half_key) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_TABLE_OR_COL value) "_con") conb)) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL half_key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL conb))) (TOK_LIMIT 10)))) @@ -304,7 +304,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-06_591_311725779652798393/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-10_269_5024522055775704015/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -332,7 +332,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas3 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas3 Stage: Stage-3 Create Table Operator: @@ -348,20 +348,20 @@ PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas3 PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas3 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-14_409_3860619873030897976/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_253_5950487784027845463/-mr-10000 POSTHOOK: query: select * from nzhang_ctas3 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas3 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-14_409_3860619873030897976/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_253_5950487784027845463/-mr-10000 0.0 val_0_con 0.0 val_0_con 0.0 val_0_con @@ -390,11 +390,11 @@ PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas3 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-15_055_2704821239289766796/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_587_8435204750824479169/-mr-10000 POSTHOOK: query: select * from nzhang_ctas3 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas3 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-15_055_2704821239289766796/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_587_8435204750824479169/-mr-10000 0.0 val_0_con 0.0 val_0_con 0.0 val_0_con @@ -406,9 +406,9 @@ 4.0 val_8_con 4.5 val_9_con PREHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas4 TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ','))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -458,7 +458,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-15_571_515417720676742183/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-26_831_6665219081002226800/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -486,7 +486,7 @@ Move Operator files: hdfs directory: true - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas4 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas4 Stage: Stage-3 Create Table Operator: @@ -502,20 +502,20 @@ PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas4 PREHOOK: query: select * from nzhang_ctas4 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_ctas4 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-22_874_9020816893750253212/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_599_6843607487274708798/-mr-10000 POSTHOOK: query: select * from nzhang_ctas4 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_ctas4 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-09-14_16-09-22_874_9020816893750253212/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_599_6843607487274708798/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -527,9 +527,9 @@ 104 val_104 104 val_104 PREHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain extended create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT ABSTRACT SYNTAX TREE: (TOK_CREATETABLE nzhang_ctas5 TOK_LIKETABLE (TOK_TABLEROWFORMAT (TOK_SERDEPROPS (TOK_TABLEROWFORMATFIELD ',') (TOK_TABLEROWFORMATLINES '\012'))) TOK_TBLTEXTFILE (TOK_QUERY (TOK_FROM (TOK_TABREF src)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL key)) (TOK_SELEXPR (TOK_TABLE_OR_COL value))) (TOK_SORTBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 10)))) @@ -569,9 +569,9 @@ type: string Needs Tagging: false Path -> Alias: - pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src [src] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src [src] Path -> Partition: - pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src Partition base file name: src input format: org.apache.hadoop.mapred.TextInputFormat @@ -582,12 +582,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src name src serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1284504429 + transient_lastDdlTime 1290111684 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -598,12 +598,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/src name src serialization.ddl struct src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1284504429 + transient_lastDdlTime 1290111684 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: src name: src @@ -613,7 +613,7 @@ File Output Operator compressed: false GlobalTableId: 0 - directory: file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + directory: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -629,7 +629,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -645,9 +645,9 @@ type: string Needs Tagging: false Path -> Alias: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 [file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002] + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 [file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002] Path -> Partition: - file:/tmp/nzhang/hive_2010-09-14_16-09-23_273_3246979497224742620/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-22-42_827_2917364249718384116/-mr-10002 Partition base file name: -mr-10002 input format: org.apache.hadoop.mapred.SequenceFileInputFormat @@ -669,9 +669,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -690,8 +690,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/nzhang/work/784/apache-hive/build/ql/scratchdir/hive_2010-09-14_16-09-23_273_3246979497224742620/-ext-10001 - destination: pfile:///data/users/nzhang/work/784/apache-hive/build/ql/test/data/warehouse/nzhang_ctas5 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-22-42_827_2917364249718384116/-ext-10001 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/nzhang_ctas5 Stage: Stage-3 Create Table Operator: @@ -709,10 +709,10 @@ PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_ctas5 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string) @@ -731,10 +731,10 @@ POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nzhang_ctas6 POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nzhang_ctas6 POSTHOOK: Output: default@nzhang_ctas7 POSTHOOK: Lineage: nzhang_ctas6.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/input19.q.out =================================================================== --- ql/src/test/results/clientpositive/input19.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/input19.q.out (working copy) @@ -1,6 +1,6 @@ -PREHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE +PREHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE -POSTHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE +POSTHOOK: query: create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: default@apachelog PREHOOK: query: LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog @@ -11,9 +11,9 @@ PREHOOK: query: SELECT a.* FROM apachelog a PREHOOK: type: QUERY PREHOOK: Input: default@apachelog -PREHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-26-57_493_8424717254986801325/10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_13-03-09_967_1761875918055572728/-mr-10000 POSTHOOK: query: SELECT a.* FROM apachelog a POSTHOOK: type: QUERY POSTHOOK: Input: default@apachelog -POSTHOOK: Output: file:/tmp/jssarma/hive_2010-07-21_11-26-57_493_8424717254986801325/10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_13-03-09_967_1761875918055572728/-mr-10000 127.0.0.1 NULL frank 10/Oct/2000:13:55:36 -0700 GET /apache_pb.gif HTTP/1.0 200 2326 Index: ql/src/test/results/clientpositive/merge3.q.out =================================================================== --- ql/src/test/results/clientpositive/merge3.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/merge3.q.out (working copy) @@ -1,13 +1,13 @@ PREHOOK: query: create table merge_src as select key, value from srcpart where ds is not null -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: create table merge_src as select key, value from srcpart where ds is not null -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 @@ -40,11 +40,11 @@ PREHOOK: query: explain extended create table merge_src2 as select key, value from merge_src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain extended create table merge_src2 as select key, value from merge_src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -78,9 +78,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -93,9 +93,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src [merge_src] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src [merge_src] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src Partition base file name: merge_src input format: org.apache.hadoop.mapred.TextInputFormat @@ -106,12 +106,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src name merge_src serialization.ddl struct merge_src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454110 + transient_lastDdlTime 1290113676 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -122,12 +122,12 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src name merge_src serialization.ddl struct merge_src { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454110 + transient_lastDdlTime 1290113676 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src name: merge_src @@ -139,15 +139,15 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 Stage: Stage-0 Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 - destination: pfile:///data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src2 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 + destination: pfile:///Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src2 Stage: Stage-5 Create Table Operator: @@ -163,11 +163,11 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10001 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10001 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -181,9 +181,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-01_163_8855372425957877493/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-54-52_127_2413179540241952240/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -203,11 +203,11 @@ PREHOOK: query: create table merge_src2 as select key, value from merge_src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@merge_src POSTHOOK: query: create table merge_src2 as select key, value from merge_src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@merge_src POSTHOOK: Output: default@merge_src2 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -217,10 +217,11 @@ PREHOOK: query: select * from merge_src2 PREHOOK: type: QUERY PREHOOK: Input: default@merge_src2 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-09_717_694531801987422048/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-09_570_5014264450775842045/-mr-10000 POSTHOOK: query: select * from merge_src2 POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src2 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-09_570_5014264450775842045/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -2284,9 +2285,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2296,13 +2297,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -2310,10 +2311,10 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [merge_src_part] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [merge_src_part] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 Partition base file name: ds=2008-04-08 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2326,7 +2327,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2336,7 +2337,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2347,7 +2348,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2357,11 +2358,11 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 Partition base file name: ds=2008-04-09 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2374,7 +2375,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2384,7 +2385,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2395,7 +2396,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -2405,7 +2406,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part @@ -2417,8 +2418,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 Stage: Stage-0 Move Operator @@ -2426,7 +2427,7 @@ partition: ds replace: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2436,29 +2437,29 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 - tmp directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10001 + tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10001 Stage: Stage-2 Stats-Aggr Operator - Stats Aggregation Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000/ + Stats Aggregation Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000/ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10000 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10000 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2469,13 +2470,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -2483,9 +2484,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-10_192_7350266745211428565/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-09_984_4986210984396331402/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -2496,13 +2497,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -2513,13 +2514,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454130 + transient_lastDdlTime 1290113709 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 name: merge_src_part2 @@ -2566,11 +2567,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge_src_part2@ds=2008-04-08 PREHOOK: Input: default@merge_src_part2@ds=2008-04-09 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-26_758_4308514117864932351/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-29_875_778506553533012381/-mr-10000 POSTHOOK: query: select * from merge_src_part2 where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src_part2@ds=2008-04-08 POSTHOOK: Input: default@merge_src_part2@ds=2008-04-09 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-29_875_778506553533012381/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -4674,10 +4676,10 @@ type: string Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [s:merge_src_part] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [s:merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 [s:merge_src_part] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 [s:merge_src_part] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-08 Partition base file name: ds=2008-04-08 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4690,7 +4692,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4700,7 +4702,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4711,7 +4713,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4721,11 +4723,11 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part/ds=2008-04-09 Partition base file name: ds=2008-04-09 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4738,7 +4740,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4748,7 +4750,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4759,7 +4761,7 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part name merge_src_part numFiles 4 numPartitions 2 @@ -4769,7 +4771,7 @@ serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe totalSize 23248 - transient_lastDdlTime 1289454120 + transient_lastDdlTime 1290113692 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part name: merge_src_part @@ -4787,9 +4789,9 @@ File Output Operator compressed: false GlobalTableId: 1 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 NumFilesPerFileSink: 1 - Stats Publishing Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000/ + Stats Publishing Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000/ table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4799,13 +4801,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -4819,8 +4821,8 @@ Move Operator files: hdfs directory: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 - destination: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 + destination: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 Stage: Stage-0 Move Operator @@ -4828,7 +4830,7 @@ partition: ds replace: true - source: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + source: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4838,29 +4840,29 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 - tmp directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10001 + tmp directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10001 Stage: Stage-2 Stats-Aggr Operator - Stats Aggregation Key Prefix: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000/ + Stats Aggregation Key Prefix: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000/ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 File Output Operator compressed: false GlobalTableId: 0 - directory: pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10000 + directory: pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10000 NumFilesPerFileSink: 1 table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4871,13 +4873,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 TotalFiles: 1 @@ -4885,9 +4887,9 @@ MultiFileSpray: false Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 [pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 [pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/scratchdir/hive_2010-11-10_21-42-27_945_6969234973394510647/-ext-10002 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/scratchdir/hive_2010-11-18_12-55-31_490_2124793241058013369/-ext-10002 Partition base file name: -ext-10002 input format: org.apache.hadoop.mapred.TextInputFormat @@ -4898,13 +4900,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -4915,13 +4917,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/merge_src_part2 + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/merge_src_part2 name merge_src_part2 partition_columns ds serialization.ddl struct merge_src_part2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454147 + transient_lastDdlTime 1290113731 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: merge_src_part2 name: merge_src_part2 @@ -4975,11 +4977,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge_src_part2@ds=2008-04-08 PREHOOK: Input: default@merge_src_part2@ds=2008-04-09 -PREHOOK: Output: file:/tmp/njain/hive_2010-11-10_21-42-43_560_3339774644672299186/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-56_157_6737432571027363960/-mr-10000 POSTHOOK: query: select * from merge_src_part2 where ds is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@merge_src_part2@ds=2008-04-08 POSTHOOK: Input: default@merge_src_part2@ds=2008-04-09 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_12-55-56_157_6737432571027363960/-mr-10000 POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: merge_src_part PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] Index: ql/src/test/results/clientpositive/query_result_fileformat.q.out =================================================================== --- ql/src/test/results/clientpositive/query_result_fileformat.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/query_result_fileformat.q.out (working copy) @@ -2,23 +2,23 @@ 1 http://asdf' value from src limit 1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table nzhang_test1 stored as sequencefile as select 'key1' as key, 'value 1 http://asdf' value from src limit 1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@nzhang_test1 PREHOOK: query: select * from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_137_345185714437305649/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-18_756_4686540378600379768/-mr-10000 POSTHOOK: query: select * from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_137_345185714437305649/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-18_756_4686540378600379768/-mr-10000 key1 value 1 @@ -26,11 +26,11 @@ PREHOOK: query: select count(*) from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_893_2470605464847588988/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-19_306_9007845540495524130/-mr-10000 POSTHOOK: query: select count(*) from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-49_893_2470605464847588988/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-19_306_9007845540495524130/-mr-10000 1 PREHOOK: query: explain select * from nzhang_test1 where key='key1' @@ -82,11 +82,11 @@ PREHOOK: query: select * from nzhang_test1 where key='key1' PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-56_447_1539901914223140072/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-27_819_8034778775852480180/-mr-10000 POSTHOOK: query: select * from nzhang_test1 where key='key1' POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-37-56_447_1539901914223140072/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-27_819_8034778775852480180/-mr-10000 key1 value 1 NULL NULL @@ -94,11 +94,11 @@ PREHOOK: query: select * from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-00_606_2534525216891512327/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-32_891_5081550764397015247/-mr-10000 POSTHOOK: query: select * from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-00_606_2534525216891512327/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-32_891_5081550764397015247/-mr-10000 key1 value 1 @@ -106,11 +106,11 @@ PREHOOK: query: select count(*) from nzhang_test1 PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-02_968_2091791272244763520/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-33_131_934017148846831316/-mr-10000 POSTHOOK: query: select count(*) from nzhang_test1 POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-02_968_2091791272244763520/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-33_131_934017148846831316/-mr-10000 1 PREHOOK: query: explain select * from nzhang_test1 where key='key1' @@ -162,11 +162,11 @@ PREHOOK: query: select * from nzhang_test1 where key='key1' PREHOOK: type: QUERY PREHOOK: Input: default@nzhang_test1 -PREHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-09_247_8932362895617955403/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-40_273_8393892543433536739/-mr-10000 POSTHOOK: query: select * from nzhang_test1 where key='key1' POSTHOOK: type: QUERY POSTHOOK: Input: default@nzhang_test1 -POSTHOOK: Output: file:/tmp/nzhang/hive_2010-08-30_23-38-09_247_8932362895617955403/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_10-59-40_273_8393892543433536739/-mr-10000 key1 value 1 Index: ql/src/test/results/clientpositive/rcfile_default_format.q.out =================================================================== --- ql/src/test/results/clientpositive/rcfile_default_format.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/rcfile_default_format.q.out (working copy) @@ -9,12 +9,12 @@ POSTHOOK: type: DESCTABLE key string from deserializer -Detailed Table Information Table(tableName:rcfile_default_format, dbName:default, owner:thiruvel, createTime:1286799201, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799201}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format, dbName:default, owner:heyongqiang, createTime:1290106787, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106787}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE rcfile_default_format_ctas AS SELECT key,value FROM src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: CREATE TABLE rcfile_default_format_ctas AS SELECT key,value FROM src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@rcfile_default_format_ctas PREHOOK: query: DESCRIBE EXTENDED rcfile_default_format_ctas @@ -24,7 +24,7 @@ key string from deserializer value string from deserializer -Detailed Table Information Table(tableName:rcfile_default_format_ctas, dbName:default, owner:thiruvel, createTime:1286799204, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format_ctas, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799204}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format_ctas, dbName:default, owner:heyongqiang, createTime:1290106793, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format_ctas, inputFormat:org.apache.hadoop.hive.ql.io.RCFileInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.RCFileOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106793}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE rcfile_default_format_txtfile (key STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE POSTHOOK: query: CREATE TABLE rcfile_default_format_txtfile (key STRING) STORED AS TEXTFILE @@ -46,12 +46,12 @@ POSTHOOK: Lineage: rcfile_default_format_txtfile.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] key string -Detailed Table Information Table(tableName:rcfile_default_format_txtfile, dbName:default, owner:thiruvel, createTime:1286799204, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/rcfile_default_format_txtfile, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1286799207, numRows=500, totalSize=1906}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:rcfile_default_format_txtfile, dbName:default, owner:heyongqiang, createTime:1290106794, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/rcfile_default_format_txtfile, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290106803, numRows=500, totalSize=1906}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: CREATE TABLE textfile_default_format_ctas AS SELECT key,value FROM rcfile_default_format_ctas -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@rcfile_default_format_ctas POSTHOOK: query: CREATE TABLE textfile_default_format_ctas AS SELECT key,value FROM rcfile_default_format_ctas -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@rcfile_default_format_ctas POSTHOOK: Output: default@textfile_default_format_ctas POSTHOOK: Lineage: rcfile_default_format_txtfile.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -63,4 +63,4 @@ key string value string -Detailed Table Information Table(tableName:textfile_default_format_ctas, dbName:default, owner:thiruvel, createTime:1286799209, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/textfile_default_format_ctas, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1286799209}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:textfile_default_format_ctas, dbName:default, owner:heyongqiang, createTime:1290106809, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/textfile_default_format_ctas, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{transient_lastDdlTime=1290106809}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/semijoin.q.out =================================================================== --- ql/src/test/results/clientpositive/semijoin.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/semijoin.q.out (working copy) @@ -1,18 +1,18 @@ PREHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@t1 PREHOOK: query: select * from t1 sort by key PREHOOK: type: QUERY PREHOOK: Input: default@t1 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-35_964_8659428901868696782/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-19_046_3895376692139653549/-mr-10000 POSTHOOK: query: select * from t1 sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-35_964_8659428901868696782/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-19_046_3895376692139653549/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -25,20 +25,20 @@ 9 val_9 10 val_10 PREHOOK: query: create table t2 as select cast(2*key as int) key, value from t1 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@t1 POSTHOOK: query: create table t2 as select cast(2*key as int) key, value from t1 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 PREHOOK: query: select * from t2 sort by key PREHOOK: type: QUERY PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-44_033_1385561037107185571/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-35_454_4705187275126291053/-mr-10000 POSTHOOK: query: select * from t2 sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-44_033_1385561037107185571/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-35_454_4705187275126291053/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -51,22 +51,22 @@ 18 val_9 20 val_10 PREHOOK: query: create table t3 as select * from (select * from t1 union all select * from t2) b -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 POSTHOOK: query: create table t3 as select * from (select * from t1 union all select * from t2) b -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Output: default@t3 PREHOOK: query: select * from t3 sort by key, value PREHOOK: type: QUERY PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-56_368_4643337669577300642/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-56_521_5858394420847642107/-mr-10000 POSTHOOK: query: select * from t3 sort by key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-17-56_368_4643337669577300642/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-00-56_521_5858394420847642107/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -97,11 +97,11 @@ PREHOOK: query: select * from t4 PREHOOK: type: QUERY PREHOOK: Input: default@t4 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_347_4261760726152540894/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-05_957_5547541419038535698/-mr-10000 POSTHOOK: query: select * from t4 POSTHOOK: type: QUERY POSTHOOK: Input: default@t4 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_347_4261760726152540894/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-05_957_5547541419038535698/-mr-10000 PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value @@ -185,7 +185,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-00_543_3830170995670719849/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_152_355729472898392348/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -217,12 +217,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_658_5539359917304769093/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_413_6128740965053236566/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-00_658_5539359917304769093/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-06_413_6128740965053236566/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -312,7 +312,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-08_284_4216408335407237139/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_265_4466548024067433690/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -344,12 +344,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-08_398_3149624530689509743/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_509_4270214608701232060/-mr-10000 POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-08_398_3149624530689509743/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-24_509_4270214608701232060/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -441,7 +441,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-16_135_6319067187632899075/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_609_1842300352332686282/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -473,12 +473,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t4 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-16_255_6778253691436748699/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_927_8797191922320768648/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t4 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-16_255_6778253691436748699/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-40_927_8797191922320768648/-mr-10000 PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value PREHOOK: type: QUERY POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value @@ -568,7 +568,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-24_857_4689023765690259253/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_384_3089194844599355448/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -596,12 +596,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-24_973_3041071756227440132/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_517_5261225666744367069/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-24_973_3041071756227440132/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-01-57_517_5261225666744367069/-mr-10000 val_0 val_0 val_0 @@ -708,7 +708,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-34_061_8275163351633601557/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-15_938_7137978578832330282/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -740,12 +740,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-34_179_4205035139034381615/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-16_057_519725218464853726/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-34_179_4205035139034381615/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-16_057_519725218464853726/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -841,7 +841,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-43_183_3107199938637789566/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_044_1882268558166284283/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -869,12 +869,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-43_306_2053779749324913446/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_201_5180642771512887111/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-43_306_2053779749324913446/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-32_201_5180642771512887111/-mr-10000 val_10 val_8 val_9 @@ -980,7 +980,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-18-52_335_849441347171062507/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-47_618_6081419597528468690/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1008,12 +1008,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-52_463_737069354382102340/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-48_046_789670215385615891/-mr-10000 POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-18-52_463_737069354382102340/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-02-48_046_789670215385615891/-mr-10000 PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value @@ -1110,7 +1110,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-00_301_5958832649298868830/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_112_1019830060466266544/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1142,12 +1142,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-00_424_4463918460829143379/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_322_221151979084285496/-mr-10000 POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-00_424_4463918460829143379/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-05_322_221151979084285496/-mr-10000 4 val_2 8 val_4 10 val_5 @@ -1229,7 +1229,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-09_365_1316122738460523098/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-20_944_438303615610048165/-mr-10002 Select Operator expressions: expr: _col0 @@ -1267,12 +1267,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-09_480_7870074811327558689/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-21_127_8134172271972764419/-mr-10000 POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-09_480_7870074811327558689/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-21_127_8134172271972764419/-mr-10000 0 0 0 @@ -1375,7 +1375,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-19_175_5090290266309302820/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_629_5752544729482637710/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1407,12 +1407,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-19_289_7283573667957611898/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_779_3560439419287948511/-mr-10000 POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-19_289_7283573667957611898/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-03-43_779_3560439419287948511/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -1523,7 +1523,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-28_320_7044231815158137622/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_648_373535142060359448/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1560,13 +1560,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-28_450_6078674943204864805/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_983_7977499042899650897/-mr-10000 POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-28_450_6078674943204864805/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-00_983_7977499042899650897/-mr-10000 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 @@ -1676,7 +1676,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-38_052_8721634945636524946/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_051_7474953441543543987/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -1708,12 +1708,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-38_174_5762672611483777652/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_231_2179425812915281444/-mr-10000 POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-38_174_5762672611483777652/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-17_231_2179425812915281444/-mr-10000 0 val_0 0 val_0 0 val_0 @@ -1838,7 +1838,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-49_190_4624457787442565360/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_573_6260564714806927759/-mr-10002 Select Operator expressions: expr: _col0 @@ -1877,13 +1877,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-49_324_9174162734156458916/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_826_3229491168132326963/-mr-10000 POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-49_324_9174162734156458916/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-33_826_3229491168132326963/-mr-10000 0 0 0 @@ -1991,7 +1991,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-19-59_535_8715349469270136515/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_240_4472382832724150377/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2020,13 +2020,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-59_667_8557238197192129755/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_429_8454721176911576485/-mr-10000 POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-19-59_667_8557238197192129755/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-04-55_429_8454721176911576485/-mr-10000 0 0 0 @@ -2146,7 +2146,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-09_255_2715293241143785335/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-12_866_4359353511062609004/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2175,13 +2175,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-09_383_4275977555046345305/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-13_034_2999962254990000638/-mr-10000 POSTHOOK: query: select a.key from t1 a right outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-09_383_4275977555046345305/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-13_034_2999962254990000638/-mr-10000 NULL NULL NULL @@ -2304,7 +2304,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-19_268_5618148586891473846/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_311_4743482924605624788/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2333,13 +2333,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-19_400_3587882397101268241/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_421_7482213301780796952/-mr-10000 POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-19_400_3587882397101268241/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-27_421_7482213301780796952/-mr-10000 NULL NULL NULL @@ -2462,7 +2462,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-28_728_1492881717279570087/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_118_4112077673076844815/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2491,13 +2491,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-28_856_8757358207361872372/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_554_6913778118057933477/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-28_856_8757358207361872372/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-43_554_6913778118057933477/-mr-10000 0 0 0 @@ -2620,7 +2620,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-38_076_4685193258029073996/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_032_4567135017384415264/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2649,13 +2649,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-38_205_3988929412565665845/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_146_932092442279199935/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-38_205_3988929412565665845/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-05-59_146_932092442279199935/-mr-10000 NULL NULL NULL @@ -2780,7 +2780,7 @@ Stage: Stage-2 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-47_590_2999113315834306029/-mr-10002 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_877_648799003197473512/-mr-10002 Reduce Output Operator key expressions: expr: _col0 @@ -2809,13 +2809,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-47_722_4677693091558572786/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_976_6198373402363644273/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-47_722_4677693091558572786/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-15_976_6198373402363644273/-mr-10000 NULL NULL NULL @@ -2984,7 +2984,7 @@ Stage: Stage-3 Map Reduce Alias -> Map Operator Tree: - file:/tmp/liyintang/hive_2010-11-04_13-20-57_841_8595738346509621581/-mr-10003 + file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-33_848_5227894741694043284/-mr-10003 Reduce Output Operator key expressions: expr: _col0 @@ -3013,13 +3013,13 @@ PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 -PREHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-57_976_1019112090965524872/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-34_080_237734989555410900/-mr-10000 POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 -POSTHOOK: Output: file:/tmp/liyintang/hive_2010-11-04_13-20-57_976_1019112090965524872/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_11-06-34_080_237734989555410900/-mr-10000 0 0 0 Index: ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out =================================================================== --- ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/show_indexes_edge_cases.q.out (working copy) @@ -201,6 +201,40 @@ POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_comment on show_idx_empty +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_comment on show_idx_empty +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_compound on show_idx_empty +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_compound on show_idx_empty +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] PREHOOK: query: DROP TABLE show_idx_empty PREHOOK: type: DROPTABLE PREHOOK: Input: default@show_idx_empty @@ -222,6 +256,40 @@ POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_1 on show_idx_full +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_1 on show_idx_full +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +PREHOOK: query: DROP INDEX idx_2 on show_idx_full +PREHOOK: type: DROPINDEX +POSTHOOK: query: DROP INDEX idx_2 on show_idx_full +POSTHOOK: type: DROPINDEX +POSTHOOK: Lineage: default__show_idx_full_idx_1__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_1__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_2__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_comment__.value2 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value2, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._bucketname SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__._offsets EXPRESSION [(show_idx_full)show_idx_full.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.key SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: default__show_idx_full_idx_compound__.value1 SIMPLE [(show_idx_full)show_idx_full.FieldSchema(name:value1, type:string, comment:null), ] PREHOOK: query: DROP TABLE show_idx_full PREHOOK: type: DROPTABLE PREHOOK: Input: default@show_idx_full Index: ql/src/test/results/clientpositive/stats10.q.out =================================================================== --- ql/src/test/results/clientpositive/stats10.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats10.q.out (working copy) @@ -123,11 +123,11 @@ PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key PREHOOK: type: QUERY PREHOOK: Input: default@bucket3_1@ds=1 -PREHOOK: Output: file:/tmp/thiruvel/hive_2010-10-11_12-41-36_921_3883563776062594481/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_21-59-35_194_6516705847755484921/-mr-10000 POSTHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket3_1@ds=1 -POSTHOOK: Output: file:/tmp/thiruvel/hive_2010-10-11_12-41-36_921_3883563776062594481/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_21-59-35_194_6516705847755484921/-mr-10000 POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -382,9 +382,9 @@ 498 val_498 1 498 val_498 1 PREHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table bucket3_1 partition (ds) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: bucket3_1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] @@ -411,14 +411,14 @@ PREHOOK: query: analyze table bucket3_1 partition (ds) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@bucket3_1@ds=1 PREHOOK: Input: default@bucket3_1@ds=2 PREHOOK: Output: default@bucket3_1 PREHOOK: Output: default@bucket3_1@ds=1 PREHOOK: Output: default@bucket3_1@ds=2 POSTHOOK: query: analyze table bucket3_1 partition (ds) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket3_1@ds=1 POSTHOOK: Input: default@bucket3_1@ds=2 POSTHOOK: Output: default@bucket3_1 @@ -444,7 +444,7 @@ value string ds string -Detailed Partition Information Partition(values:[1], dbName:default, tableName:bucket3_1, createTime:1286826089, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1/ds=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1286826103, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[1], dbName:default, tableName:bucket3_1, createTime:1290146355, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1/ds=1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1290146388, numRows=500, totalSize=5812}) PREHOOK: query: describe extended bucket3_1 partition (ds='2') PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended bucket3_1 partition (ds='2') @@ -459,7 +459,7 @@ value string ds string -Detailed Partition Information Partition(values:[2], dbName:default, tableName:bucket3_1, createTime:1286826096, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1/ds=2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1286826103, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2], dbName:default, tableName:bucket3_1, createTime:1290146374, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1/ds=2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), parameters:{numFiles=2, transient_lastDdlTime=1290146388, numRows=500, totalSize=5812}) PREHOOK: query: describe extended bucket3_1 PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended bucket3_1 @@ -474,4 +474,4 @@ value string ds string -Detailed Table Information Table(tableName:bucket3_1, dbName:default, owner:thiruvel, createTime:1286826085, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/bucket3_1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=4, transient_lastDdlTime=1286826103, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:bucket3_1, dbName:default, owner:heyongqiang, createTime:1290146344, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null), FieldSchema(name:ds, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/bucket3_1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null)], parameters:{numPartitions=2, numFiles=4, transient_lastDdlTime=1290146388, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats12.q.out =================================================================== --- ql/src/test/results/clientpositive/stats12.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats12.q.out (working copy) @@ -30,10 +30,10 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,10 +60,10 @@ GatherStats: true Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 [analyze_srcpart] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 Partition base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat @@ -77,13 +77,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -94,17 +94,17 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12 Partition base file name: hr=12 input format: org.apache.hadoop.mapred.TextInputFormat @@ -118,13 +118,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -135,13 +135,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454661 + transient_lastDdlTime 1290146533 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart @@ -152,14 +152,14 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart @@ -190,7 +190,7 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289454661, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289454686, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146533, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146557, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) @@ -208,7 +208,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454677, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454686, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146549, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146557, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) @@ -226,7 +226,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454677, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454686, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146557, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) @@ -244,7 +244,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454678, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454678}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146550}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) @@ -262,4 +262,4 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454678, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454678}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146550, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146550}) Index: ql/src/test/results/clientpositive/stats13.q.out =================================================================== --- ql/src/test/results/clientpositive/stats13.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats13.q.out (working copy) @@ -30,10 +30,10 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain extended analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,9 +60,9 @@ GatherStats: true Needs Tagging: false Path -> Alias: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 [analyze_srcpart] Path -> Partition: - pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 + pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11 Partition base file name: hr=11 input format: org.apache.hadoop.mapred.TextInputFormat @@ -76,13 +76,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454688 + transient_lastDdlTime 1290146558 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe input format: org.apache.hadoop.mapred.TextInputFormat @@ -93,13 +93,13 @@ columns.types string:string file.inputformat org.apache.hadoop.mapred.TextInputFormat file.outputformat org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - location pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart + location pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart name analyze_srcpart partition_columns ds/hr serialization.ddl struct analyze_srcpart { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - transient_lastDdlTime 1289454688 + transient_lastDdlTime 1290146558 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: analyze_srcpart name: analyze_srcpart @@ -110,12 +110,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -144,7 +144,7 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289454688, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1289454710, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146558, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1290146579, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=11) @@ -162,7 +162,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454702, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289454710, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146572, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146579, numRows=500, totalSize=5812}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-08', hr=12) @@ -180,7 +180,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454703, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454703}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146572, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146572}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=11) @@ -198,7 +198,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289454703, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454703}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146573, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146573}) PREHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: desc extended analyze_srcpart partition (ds='2008-04-09', hr=12) @@ -216,7 +216,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289454704, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289454704}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146573, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146573}) PREHOOK: query: create table analyze_srcpart2 like analyze_srcpart PREHOOK: type: CREATETABLE POSTHOOK: query: create table analyze_srcpart2 like analyze_srcpart @@ -247,4 +247,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart2, dbName:default, owner:null, createTime:1289454711, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1289454711}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart2, dbName:default, owner:null, createTime:1290146580, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart2, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1290146580}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats2.q.out =================================================================== --- ql/src/test/results/clientpositive/stats2.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats2.q.out (working copy) @@ -103,11 +103,11 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1289454712, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1289454712}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1290126752, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{transient_lastDdlTime=1290126752}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: explain analyze table analyze_t1 partition (ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_t1 partition (ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_t1 PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -136,7 +136,7 @@ PREHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=12 PREHOOK: Input: default@analyze_t1@ds=2008-04-09/hr=11 @@ -147,7 +147,7 @@ PREHOOK: Output: default@analyze_t1@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_t1@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_t1 partition (ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_t1@ds=2008-04-08/hr=12 POSTHOOK: Input: default@analyze_t1@ds=2008-04-09/hr=11 @@ -182,4 +182,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1289454712, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1289454732, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_t1, dbName:default, owner:null, createTime:1290126752, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_t1, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1290126769, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats5.q.out =================================================================== --- ql/src/test/results/clientpositive/stats5.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats5.q.out (working copy) @@ -1,14 +1,14 @@ PREHOOK: query: create table analyze_src as select * from src -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table analyze_src as select * from src -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@analyze_src PREHOOK: query: explain analyze table analyze_src compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_src compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: (TOK_ANALYZE (TOK_TABTYPE analyze_src)) @@ -29,11 +29,11 @@ PREHOOK: query: analyze table analyze_src compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_src PREHOOK: Output: default@analyze_src POSTHOOK: query: analyze table analyze_src compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_src POSTHOOK: Output: default@analyze_src PREHOOK: query: describe extended analyze_src @@ -43,4 +43,4 @@ key string value string -Detailed Table Information Table(tableName:analyze_src, dbName:default, owner:thiruvel, createTime:1286826326, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/analyze_src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1286826330, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_src, dbName:default, owner:heyongqiang, createTime:1290146681, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_src, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290146692, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats6.q.out =================================================================== --- ql/src/test/results/clientpositive/stats6.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats6.q.out (working copy) @@ -29,12 +29,12 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -47,12 +47,12 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 @@ -81,7 +81,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495447, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495455, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146715, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146726, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -99,7 +99,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495460, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146716, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146736, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) @@ -117,7 +117,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289495448}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146716, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146716}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) @@ -135,7 +135,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495448, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1289495448}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146717, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{transient_lastDdlTime=1290146717}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -153,4 +153,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495436, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289495460, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146693, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146737, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats7.q.out =================================================================== --- ql/src/test/results/clientpositive/stats7.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats7.q.out (working copy) @@ -29,9 +29,9 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,14 +60,14 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart @@ -98,7 +98,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495472, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495480, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146760, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146772, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -116,7 +116,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495473, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495480, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146761, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146772, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -134,4 +134,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495462, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1289495480, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146739, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=2, numFiles=2, transient_lastDdlTime=1290146772, numRows=1000, totalSize=11624}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats8.q.out =================================================================== --- ql/src/test/results/clientpositive/stats8.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats8.q.out (working copy) @@ -29,9 +29,9 @@ POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -60,12 +60,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=11 @@ -94,7 +94,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495500, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146797, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146810, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -112,11 +112,11 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1289495500, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146774, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=1, numFiles=1, transient_lastDdlTime=1290146810, numRows=500, totalSize=5812}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -145,12 +145,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-08',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 @@ -179,11 +179,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495506, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146820, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -212,12 +212,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=11) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -246,11 +246,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495512, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146830, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -279,12 +279,12 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: default@analyze_srcpart PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds='2008-04-09',hr=12) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 @@ -313,11 +313,11 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495495, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495518, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146800, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146842, numRows=500, totalSize=5812}) PREHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: analyze_srcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] @@ -346,7 +346,7 @@ PREHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -357,7 +357,7 @@ PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=11 PREHOOK: Output: default@analyze_srcpart@ds=2008-04-09/hr=12 POSTHOOK: query: analyze table analyze_srcpart PARTITION(ds, hr) compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-09/hr=11 @@ -392,7 +392,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495525, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146797, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-08',hr=12) @@ -410,7 +410,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495525, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-08, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-08/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=11) @@ -428,7 +428,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1289495494, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495526, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 11], dbName:default, tableName:analyze_srcpart, createTime:1290146799, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=11, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart PARTITION(ds='2008-04-09',hr=12) @@ -446,7 +446,7 @@ ds string hr string -Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1289495495, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1289495526, numRows=500, totalSize=5812}) +Detailed Partition Information Partition(values:[2008-04-09, 12], dbName:default, tableName:analyze_srcpart, createTime:1290146800, lastAccessTime:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart/ds=2008-04-09/hr=12, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), parameters:{numFiles=1, transient_lastDdlTime=1290146854, numRows=500, totalSize=5812}) PREHOOK: query: describe extended analyze_srcpart PREHOOK: type: DESCTABLE POSTHOOK: query: describe extended analyze_srcpart @@ -464,4 +464,4 @@ ds string hr string -Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1289495481, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/data/users/njain/hive1/hive1/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1289495526, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcpart, dbName:default, owner:null, createTime:1290146774, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:string, comment:default), FieldSchema(name:value, type:string, comment:default), FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcpart, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:ds, type:string, comment:null), FieldSchema(name:hr, type:string, comment:null)], parameters:{numPartitions=4, numFiles=4, transient_lastDdlTime=1290146854, numRows=2000, totalSize=23248}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/stats9.q.out =================================================================== --- ql/src/test/results/clientpositive/stats9.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/stats9.q.out (working copy) @@ -14,9 +14,9 @@ POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: analyze_srcbucket.value SIMPLE [(srcbucket)srcbucket.FieldSchema(name:value, type:string, comment:null), ] PREHOOK: query: explain analyze table analyze_srcbucket compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY POSTHOOK: query: explain analyze table analyze_srcbucket compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: analyze_srcbucket.value SIMPLE [(srcbucket)srcbucket.FieldSchema(name:value, type:string, comment:null), ] ABSTRACT SYNTAX TREE: @@ -39,11 +39,11 @@ PREHOOK: query: analyze table analyze_srcbucket compute statistics -PREHOOK: type: null +PREHOOK: type: QUERY PREHOOK: Input: default@analyze_srcbucket PREHOOK: Output: default@analyze_srcbucket POSTHOOK: query: analyze table analyze_srcbucket compute statistics -POSTHOOK: type: null +POSTHOOK: type: QUERY POSTHOOK: Input: default@analyze_srcbucket POSTHOOK: Output: default@analyze_srcbucket POSTHOOK: Lineage: analyze_srcbucket.key SIMPLE [(srcbucket)srcbucket.FieldSchema(name:key, type:int, comment:null), ] @@ -57,4 +57,4 @@ key int value string -Detailed Table Information Table(tableName:analyze_srcbucket, dbName:default, owner:thiruvel, createTime:1286826500, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/home/thiruvel/projects/hive/hive.unsecure/build/ql/test/data/warehouse/analyze_srcbucket, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, EXTERNAL=FALSE, numFiles=1, transient_lastDdlTime=1286826508, numRows=1000, totalSize=11603}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) +Detailed Table Information Table(tableName:analyze_srcbucket, dbName:default, owner:heyongqiang, createTime:1290146857, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:key, type:int, comment:null), FieldSchema(name:value, type:string, comment:null)], location:pfile:/Users/heyongqiang/Documents/workspace/Hive-2/build/ql/test/data/warehouse/analyze_srcbucket, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:2, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[key], sortCols:[], parameters:{}), partitionKeys:[], parameters:{numPartitions=0, numFiles=1, transient_lastDdlTime=1290146885, numRows=1000, totalSize=11603}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE) Index: ql/src/test/results/clientpositive/str_to_map.q.out =================================================================== --- ql/src/test/results/clientpositive/str_to_map.q.out (revision 1050266) +++ ql/src/test/results/clientpositive/str_to_map.q.out (working copy) @@ -48,11 +48,11 @@ PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-48_939_5414753236298860779/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-47_081_185807984859506518/-mr-10000 POSTHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-48_939_5414753236298860779/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-47_081_185807984859506518/-mr-10000 1 1 1 @@ -95,11 +95,11 @@ PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-51_828_6905327639418151142/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-53_276_628458177270710173/-mr-10000 POSTHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-51_828_6905327639418151142/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-11-53_276_628458177270710173/-mr-10000 {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} @@ -142,11 +142,11 @@ PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-54_404_1637219732245487353/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-00_242_6299210516765000168/-mr-10000 POSTHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-54_404_1637219732245487353/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-00_242_6299210516765000168/-mr-10000 {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} {"b":"2","c":"3","a":"1"} @@ -205,13 +205,13 @@ limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-56_995_8619552318902310354/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-08_090_5560348263638743320/-mr-10000 POSTHOOK: query: select str_to_map(t.ss,',',':')['a'] from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-48-56_995_8619552318902310354/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-08_090_5560348263638743320/-mr-10000 1 1 1 @@ -220,20 +220,20 @@ POSTHOOK: query: drop table tbl_s2m POSTHOOK: type: DROPTABLE PREHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src limit 3 -PREHOOK: type: CREATETABLE +PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src POSTHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src limit 3 -POSTHOOK: type: CREATETABLE +POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl_s2m PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m PREHOOK: type: QUERY PREHOOK: Input: default@tbl_s2m -PREHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-49-02_576_7096389194927995175/-mr-10000 +PREHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-26_274_4931967655402208603/-mr-10000 POSTHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_s2m -POSTHOOK: Output: file:/tmp/sdong/hive_2010-11-12_00-49-02_576_7096389194927995175/-mr-10000 +POSTHOOK: Output: file:/var/folders/6g/6grtCwPMEf4sqHUPpy6xQG9ByHg/-Tmp-/heyongqiang/hive_2010-11-18_16-12-26_274_4931967655402208603/-mr-10000 444 444 444