diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index f71bc3c..0607913 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -24,7 +24,6 @@ import java.net.URISyntaxException; import java.security.AccessControlException; import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; import java.util.BitSet; import java.util.List; @@ -34,12 +33,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus; @@ -628,4 +625,62 @@ public static boolean equalsFileSystem(FileSystem fs1, FileSystem fs2) { //Once equality has been added in HDFS-4321, we should make use of it return fs1.getUri().equals(fs2.getUri()); } + + /** + * Checks if delete can be performed on given path by given user. + * If file does not exist it just returns without throwing an Exception + * @param path + * @param conf + * @param user + * @throws AccessControlException + * @throws InterruptedException + * @throws Exception + */ + public static void checkDeletePermission(Path path, Configuration conf, String user) + throws AccessControlException, InterruptedException, Exception { + // This requires ability to delete the given path. + // The following 2 conditions should be satisfied for this- + // 1. Write permissions on parent dir + // 2. If sticky bit is set on parent dir then one of following should be + // true + // a. User is owner of the current dir/file + // b. User is owner of the parent dir + // Super users are also allowed to drop the file, but there is no good way of checking + // if a user is a super user. Also super users running hive queries is not a common + // use case. super users can also do a chown to be able to drop the file + + final FileSystem fs = path.getFileSystem(conf); + if (!fs.exists(path)) { + // no file/dir to be deleted + return; + } + Path parPath = path.getParent(); + // check user has write permissions on the parent dir + FileStatus stat = fs.getFileStatus(path); + FileUtils.checkFileAccessWithImpersonation(fs, stat, FsAction.WRITE, user); + + // check if sticky bit is set on the parent dir + FileStatus parStatus = fs.getFileStatus(parPath); + if (!parStatus.getPermission().getStickyBit()) { + // no sticky bit, so write permission on parent dir is sufficient + // no further checks needed + return; + } + + // check if user is owner of parent dir + if (parStatus.getOwner().equals(user)) { + return; + } + + // check if user is owner of current dir/file + FileStatus childStatus = fs.getFileStatus(path); + if (childStatus.getOwner().equals(user)) { + return; + } + String msg = String.format("Permission Denied: User %s can't delete %s because sticky bit is" + + " set on the parent dir and user does not own this file or its parent", user, path); + throw new IOException(msg); + + } + } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index cefa516..63052f3 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -54,7 +54,6 @@ * Hive Configuration. */ public class HiveConf extends Configuration { - protected String hiveJar; protected Properties origProp; protected String auxJars; @@ -1415,6 +1414,14 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "to construct a list exception handlers to handle exceptions thrown\n" + "by record readers"), + // operation log configuration + HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true, + "When true, HS2 will save operation logs"), + HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location", + "${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator + + "operation_logs", + "Top level directory where operation logs are stored if logging functionality is enabled"), + // logging configuration HIVE_LOG4J_FILE("hive.log4j.file", "", "Hive log4j configuration file.\n" + @@ -1486,8 +1493,11 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "Minimum number of worker threads when in HTTP mode."), HIVE_SERVER2_THRIFT_HTTP_MAX_WORKER_THREADS("hive.server2.thrift.http.max.worker.threads", 500, "Maximum number of worker threads when in HTTP mode."), - HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", 1800000, + HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", 1800000, "Maximum idle time in milliseconds for a connection on the server when in HTTP mode."), + HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", 60, + "Keepalive time (in seconds) for an idle http worker thread. When number of workers > min workers, " + + "excess threads are killed after this time interval."), // binary transport settings HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000, @@ -1510,7 +1520,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "Minimum number of Thrift worker threads"), HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500, "Maximum number of Thrift worker threads"), - + HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", 60, + "Keepalive time (in seconds) for an idle worker thread. When number of workers > min workers, " + + "excess threads are killed after this time interval."), // Configuration for async thread pool in SessionManager HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100, "Number of threads in the async thread pool for HiveServer2"), diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out index c5a22b0..988bf10 100644 --- a/contrib/src/test/results/clientnegative/serde_regex.q.out +++ b/contrib/src/test/results/clientnegative/serde_regex.q.out @@ -56,7 +56,7 @@ STAGE PLANS: serde properties: input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s - name: serde_regex + name: default.serde_regex PREHOOK: query: CREATE TABLE serde_regex( host STRING, diff --git a/contrib/src/test/results/clientpositive/fileformat_base64.q.out b/contrib/src/test/results/clientpositive/fileformat_base64.q.out index 852e351..1be2995 100644 --- a/contrib/src/test/results/clientpositive/fileformat_base64.q.out +++ b/contrib/src/test/results/clientpositive/fileformat_base64.q.out @@ -22,7 +22,7 @@ STAGE PLANS: columns: key int, value string input format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat output format: org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat - name: base64_test + name: default.base64_test PREHOOK: query: CREATE TABLE base64_test(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat' diff --git a/contrib/src/test/results/clientpositive/serde_regex.q.out b/contrib/src/test/results/clientpositive/serde_regex.q.out index 13019eb..dc97cb3 100644 --- a/contrib/src/test/results/clientpositive/serde_regex.q.out +++ b/contrib/src/test/results/clientpositive/serde_regex.q.out @@ -48,7 +48,7 @@ STAGE PLANS: serde properties: input.regex ([^ ]*) ([^ ]*) ([^ ]*) (-|\[[^\]]*\]) ([^ "]*|"[^"]*") (-|[0-9]*) (-|[0-9]*)(?: ([^ "]*|"[^"]*") ([^ "]*|"[^"]*"))? output.format.string %1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s - name: serde_regex + name: default.serde_regex PREHOOK: query: CREATE TABLE serde_regex( host STRING, diff --git a/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out b/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out index 61e333c..d2e0b81 100644 --- a/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out +++ b/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out @@ -20,7 +20,7 @@ WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string") TBLPROPERTIES ("hbase.table.name" = "hbase_table_0") PREHOOK: type: CREATETABLE PREHOOK: Output: database:hbasedb -PREHOOK: Output: hbaseDB@hbaseDB.hbase_table_0 +PREHOOK: Output: hbaseDB@hbase_table_0 POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S) -- Hadoop 0.23 changes the behavior FsShell on Exit Codes -- In Hadoop 0.20 @@ -37,7 +37,6 @@ WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string") TBLPROPERTIES ("hbase.table.name" = "hbase_table_0") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:hbasedb -POSTHOOK: Output: hbaseDB@hbaseDB.hbase_table_0 POSTHOOK: Output: hbaseDB@hbase_table_0 Found 3 items #### A masked pattern was here #### diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index ff0f210..172ff01 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -86,7 +86,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, "Operation not supported. Create table as " + "Select is not a valid operation."); - case HiveParser.TOK_TABLEBUCKETS: + case HiveParser.TOK_ALTERTABLE_BUCKETS: break; case HiveParser.TOK_LIKETABLE: diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 4d338b5..6c54c05 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -71,7 +71,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) hook = new CreateDatabaseHook(); return hook.preAnalyze(context, ast); - case HiveParser.TOK_ALTERTABLE_PARTITION: + case HiveParser.TOK_ALTERTABLE: if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { return ast; } else if (((ASTNode) ast.getChild(1)).getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { @@ -163,7 +163,6 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_CREATETABLE: case HiveParser.TOK_CREATEDATABASE: - case HiveParser.TOK_ALTERTABLE_PARTITION: // HCat will allow these operations to be performed. // Database DDL @@ -178,12 +177,20 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_CREATEINDEX: case HiveParser.TOK_DROPINDEX: case HiveParser.TOK_SHOWINDEXES: + break; // View DDL //case HiveParser.TOK_ALTERVIEW_ADDPARTS: - case HiveParser.TOK_ALTERVIEW_DROPPARTS: - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_RENAME: + case HiveParser.TOK_ALTERVIEW: + switch (ast.getChild(1).getType()) { + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + case HiveParser.TOK_ALTERVIEW_DROPPARTS: + case HiveParser.TOK_ALTERVIEW_RENAME: + case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_ALTERVIEW_DROPPROPERTIES: + } + break; + case HiveParser.TOK_CREATEVIEW: case HiveParser.TOK_DROPVIEW: @@ -205,20 +212,39 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_DESCFUNCTION: case HiveParser.TOK_SHOWFUNCTIONS: case HiveParser.TOK_EXPLAIN: + break; // Table DDL - case HiveParser.TOK_ALTERTABLE_ADDPARTS: - case HiveParser.TOK_ALTERTABLE_ADDCOLS: - case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION: - case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: - case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: - case HiveParser.TOK_ALTERTABLE_DROPPARTS: - case HiveParser.TOK_ALTERTABLE_PROPERTIES: - case HiveParser.TOK_ALTERTABLE_RENAME: - case HiveParser.TOK_ALTERTABLE_RENAMECOL: - case HiveParser.TOK_ALTERTABLE_REPLACECOLS: - case HiveParser.TOK_ALTERTABLE_SERIALIZER: - case HiveParser.TOK_ALTERTABLE_TOUCH: + case HiveParser.TOK_ALTERTABLE: + switch (ast.getChild(1).getType()) { + case HiveParser.TOK_ALTERTABLE_ADDPARTS: + case HiveParser.TOK_ALTERTABLE_ADDCOLS: + case HiveParser.TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION: + case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: + case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: + case HiveParser.TOK_ALTERTABLE_DROPPARTS: + case HiveParser.TOK_ALTERTABLE_PROPERTIES: + case HiveParser.TOK_ALTERTABLE_DROPPROPERTIES: + case HiveParser.TOK_ALTERTABLE_RENAME: + case HiveParser.TOK_ALTERTABLE_RENAMECOL: + case HiveParser.TOK_ALTERTABLE_REPLACECOLS: + case HiveParser.TOK_ALTERTABLE_SERIALIZER: + case HiveParser.TOK_ALTERTABLE_TOUCH: + case HiveParser.TOK_ALTERTABLE_ARCHIVE: + case HiveParser.TOK_ALTERTABLE_UNARCHIVE: + case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION: + case HiveParser.TOK_ALTERTABLE_SKEWED: + case HiveParser.TOK_ALTERTABLE_FILEFORMAT: + case HiveParser.TOK_ALTERTABLE_PROTECTMODE: + case HiveParser.TOK_ALTERTABLE_LOCATION: + case HiveParser.TOK_ALTERTABLE_MERGEFILES: + case HiveParser.TOK_ALTERTABLE_RENAMEPART: + case HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION: + case HiveParser.TOK_ALTERTABLE_BUCKETS: + case HiveParser.TOK_ALTERTABLE_COMPACT: + } + break; + case HiveParser.TOK_DESCTABLE: case HiveParser.TOK_DROPTABLE: case HiveParser.TOK_SHOW_TABLESTATUS: diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index 1e25ed3..606cb3a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -156,7 +156,7 @@ public void testDatabaseOperations() throws MetaException, CommandNeedRetryExcep public void testCreateTableIfNotExists() throws MetaException, TException, NoSuchObjectException, CommandNeedRetryException { hcatDriver.run("drop table " + TBL_NAME); - hcatDriver.run("create table junit_sem_analysis (a int) stored as RCFILE"); + hcatDriver.run("create table " + TBL_NAME + " (a int) stored as RCFILE"); Table tbl = client.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, TBL_NAME); List cols = tbl.getSd().getCols(); assertEquals(1, cols.size()); diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java index ea7c54c..52586a0 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java @@ -51,6 +51,7 @@ private int createTime; private int lastAccessTime; private StorageDescriptor sd; + private List columns; // Cache column-list from this.sd. private Map parameters; // For use from within HCatClient.getPartitions(). @@ -68,6 +69,7 @@ } this.sd = partition.getSd(); + this.columns = getColumns(this.sd); } // For constructing HCatPartitions afresh, as an argument to HCatClient.addPartitions(). @@ -77,6 +79,7 @@ public HCatPartition(HCatTable hcatTable, Map partitionKeyValues this.dbName = hcatTable.getDbName(); this.sd = new StorageDescriptor(hcatTable.getSd()); this.sd.setLocation(location); + this.columns = getColumns(this.sd); this.createTime = (int)(System.currentTimeMillis()/1000); this.lastAccessTime = -1; this.values = new ArrayList(hcatTable.getPartCols().size()); @@ -98,7 +101,7 @@ public HCatPartition(HCatPartition rhs, Map partitionKeyValues, this.dbName = rhs.dbName; this.sd = new StorageDescriptor(rhs.sd); this.sd.setLocation(location); - + this.columns = getColumns(this.sd); this.createTime = (int) (System.currentTimeMillis() / 1000); this.lastAccessTime = -1; this.values = new ArrayList(hcatTable.getPartCols().size()); @@ -112,6 +115,14 @@ public HCatPartition(HCatPartition rhs, Map partitionKeyValues, } } + private static List getColumns(StorageDescriptor sd) throws HCatException { + ArrayList columns = new ArrayList(sd.getColsSize()); + for (FieldSchema fieldSchema : sd.getCols()) { + columns.add(HCatSchemaUtils.getHCatFieldSchema(fieldSchema)); + } + return columns; + } + // For use from HCatClient.addPartitions(), to construct from user-input. Partition toHivePartition() throws HCatException { Partition hivePtn = new Partition(); @@ -172,11 +183,7 @@ public String getDatabaseName() { * * @return the columns */ - public List getColumns() throws HCatException { - ArrayList columns = new ArrayList(sd.getColsSize()); - for (FieldSchema fieldSchema : sd.getCols()) { - columns.add(HCatSchemaUtils.getHCatFieldSchema(fieldSchema)); - } + public List getColumns() { return columns; } diff --git a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 4502439..f3a0b2e 100644 --- a/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ b/itests/hive-unit/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -28,6 +28,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStore; @@ -49,6 +50,7 @@ public static final String HS2_BINARY_MODE = "binary"; public static final String HS2_HTTP_MODE = "http"; private static final String driverName = "org.apache.hive.jdbc.HiveDriver"; + private static final FsPermission FULL_PERM = new FsPermission((short)00777); private HiveServer2 hiveServer2 = null; private final File baseDir; private final Path baseDfsDir; @@ -59,6 +61,7 @@ private boolean useMiniKdc = false; private final String serverPrincipal; private final String serverKeytab; + private final boolean isMetastoreRemote; public static class Builder { private HiveConf hiveConf = new HiveConf(); @@ -67,6 +70,7 @@ private String serverPrincipal; private String serverKeytab; private boolean isHTTPTransMode = false; + private boolean isMetastoreRemote; public Builder() { } @@ -83,6 +87,11 @@ public Builder withMiniKdc(String serverPrincipal, String serverKeytab) { return this; } + public Builder withRemoteMetastore() { + this.isMetastoreRemote = true; + return this; + } + public Builder withConf(HiveConf hiveConf) { this.hiveConf = hiveConf; return this; @@ -107,7 +116,8 @@ public MiniHS2 build() throws Exception { } else { hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, HS2_BINARY_MODE); } - return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab); + return new MiniHS2(hiveConf, useMiniMR, useMiniKdc, serverPrincipal, serverKeytab, + isMetastoreRemote); } } @@ -139,12 +149,14 @@ public boolean isUseMiniKdc() { return useMiniKdc; } - private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, String serverPrincipal, String serverKeytab) throws Exception { + private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, + String serverPrincipal, String serverKeytab, boolean isMetastoreRemote) throws Exception { super(hiveConf, "localhost", MetaStoreUtils.findFreePort(), MetaStoreUtils.findFreePort()); this.useMiniMR = useMiniMR; this.useMiniKdc = useMiniKdc; this.serverPrincipal = serverPrincipal; this.serverKeytab = serverKeytab; + this.isMetastoreRemote = isMetastoreRemote; baseDir = Files.createTempDir(); FileSystem fs; if (useMiniMR) { @@ -169,6 +181,9 @@ private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, String fs.mkdirs(baseDfsDir); Path wareHouseDir = new Path(baseDfsDir, "warehouse"); + // Create warehouse with 777, so that user impersonation has no issues. + FileSystem.mkdirs(fs, wareHouseDir, FULL_PERM); + fs.mkdirs(wareHouseDir); setWareHouseDir(wareHouseDir.toString()); System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, metaStoreURL); @@ -180,10 +195,15 @@ private MiniHS2(HiveConf hiveConf, boolean useMiniMR, boolean useMiniKdc, String hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, getHttpPort()); Path scratchDir = new Path(baseDfsDir, "scratch"); - fs.mkdirs(scratchDir); + + // Create scratchdir with 777, so that user impersonation has no issues. + FileSystem.mkdirs(fs, scratchDir, FULL_PERM); System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString()); - System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, - baseDir.getPath() + File.separator + "scratch"); + hiveConf.setVar(ConfVars.SCRATCHDIR, scratchDir.toString()); + + String localScratchDir = baseDir.getPath() + File.separator + "scratch"; + System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, localScratchDir); + hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, localScratchDir); } public MiniHS2(HiveConf hiveConf) throws Exception { @@ -191,10 +211,17 @@ public MiniHS2(HiveConf hiveConf) throws Exception { } public MiniHS2(HiveConf hiveConf, boolean useMiniMR) throws Exception { - this(hiveConf, useMiniMR, false, null, null); + this(hiveConf, useMiniMR, false, null, null, false); } public void start(Map confOverlay) throws Exception { + if (isMetastoreRemote) { + int metaStorePort = MetaStoreUtils.findFreePort(); + getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort); + MetaStoreUtils.startMetaStore(metaStorePort, + ShimLoader.getHadoopThriftAuthBridge(), getHiveConf()); + } + hiveServer2 = new HiveServer2(); // Set confOverlay parameters for (Map.Entry entry : confOverlay.entrySet()) { @@ -208,6 +235,9 @@ public void start(Map confOverlay) throws Exception { public void stop() { verifyStarted(); + // Currently there is no way to stop the MetaStore service. It will be stopped when the + // test JVM exits. This is how other tests are also using MetaStore server. + hiveServer2.stop(); setStarted(false); try { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java new file mode 100644 index 0000000..23ab8b6d --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; +import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Test cases focusing on drop table permission checks + */ +public class TestStorageBasedMetastoreAuthorizationDrops extends TestCase{ + protected HiveConf clientHiveConf; + protected HiveMetaStoreClient msc; + protected Driver driver; + protected UserGroupInformation ugi; + private static int objNum = 0; + + protected String getAuthorizationProvider(){ + return StorageBasedAuthorizationProvider.class.getName(); + } + + protected HiveConf createHiveConf() throws Exception { + return new HiveConf(this.getClass()); + } + + @Override + protected void setUp() throws Exception { + + super.setUp(); + + int port = MetaStoreUtils.findFreePort(); + + // Turn on metastore-side authorization + System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, + AuthorizationPreEventListener.class.getName()); + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, + getAuthorizationProvider()); + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, + InjectableDummyAuthenticator.class.getName()); + + MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + + clientHiveConf = createHiveConf(); + + // Turn off client-side authorization + clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); + + clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + + clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + + ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf); + + SessionState.start(new CliSessionState(clientHiveConf)); + msc = new HiveMetaStoreClient(clientHiveConf, null); + driver = new Driver(clientHiveConf); + + setupFakeUser(); + } + + + public void testDropDatabase() throws Exception { + dropDatabaseByOtherUser("-rwxrwxrwx", 0); + dropDatabaseByOtherUser("-rwxrwxrwt", 1); + } + + /** + * Creates db and tries to drop as 'other' user + * @param perm - permission for warehouse dir + * @param expectedRet - expected return code for drop by other user + * @throws Exception + */ + private void dropDatabaseByOtherUser(String perm, int expectedRet) throws Exception { + String dbName = getTestDbName(); + setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm); + + CommandProcessorResponse resp = driver.run("create database " + dbName); + assertEquals(0, resp.getResponseCode()); + Database db = msc.getDatabase(dbName); + validateCreateDb(db, dbName); + + InjectableDummyAuthenticator.injectMode(true); + + + resp = driver.run("drop database " + dbName); + assertEquals(expectedRet, resp.getResponseCode()); + + } + + public void testDropTable() throws Exception { + dropTableByOtherUser("-rwxrwxrwx", 0); + dropTableByOtherUser("-rwxrwxrwt", 1); + } + + /** + * @param perm dir permission for database dir + * @param expectedRet expected return code on drop table + * @throws Exception + */ + private void dropTableByOtherUser(String perm, int expectedRet) throws Exception { + String dbName = getTestDbName(); + String tblName = getTestTableName(); + setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); + + CommandProcessorResponse resp = driver.run("create database " + dbName); + assertEquals(0, resp.getResponseCode()); + Database db = msc.getDatabase(dbName); + validateCreateDb(db, dbName); + + setPermissions(db.getLocationUri(), perm); + + String dbDotTable = dbName + "." + tblName; + resp = driver.run("create table " + dbDotTable + "(i int)"); + assertEquals(0, resp.getResponseCode()); + + + InjectableDummyAuthenticator.injectMode(true); + resp = driver.run("drop table " + dbDotTable); + assertEquals(expectedRet, resp.getResponseCode()); + } + + + public void testDropPartition() throws Exception { + dropPartitionByOtherUser("-rwxrwxrwx", 0); + dropPartitionByOtherUser("-rwxrwxrwt", 1); + } + + /** + * @param perm permissions for table dir + * @param expectedRet expected return code + * @throws Exception + */ + private void dropPartitionByOtherUser(String perm, int expectedRet) throws Exception { + String dbName = getTestDbName(); + String tblName = getTestTableName(); + setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); + + CommandProcessorResponse resp = driver.run("create database " + dbName); + assertEquals(0, resp.getResponseCode()); + Database db = msc.getDatabase(dbName); + validateCreateDb(db, dbName); + setPermissions(db.getLocationUri(), "-rwxrwxrwx"); + + String dbDotTable = dbName + "." + tblName; + resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (b string)"); + assertEquals(0, resp.getResponseCode()); + Table tab = msc.getTable(dbName, tblName); + setPermissions(tab.getSd().getLocation(), perm); + + resp = driver.run("alter table " + dbDotTable + " add partition (b='2011')"); + assertEquals(0, resp.getResponseCode()); + + InjectableDummyAuthenticator.injectMode(true); + resp = driver.run("alter table " + dbDotTable + " drop partition (b='2011')"); + assertEquals(expectedRet, resp.getResponseCode()); + } + + private void setupFakeUser() { + String fakeUser = "mal"; + List fakeGroupNames = new ArrayList(); + fakeGroupNames.add("groupygroup"); + + InjectableDummyAuthenticator.injectUserName(fakeUser); + InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames); + InjectableDummyAuthenticator.injectMode(true); + } + + private String setupUser() { + return ugi.getUserName(); + } + + private String getTestTableName() { + return this.getClass().getSimpleName() + "tab" + ++objNum; + } + + private String getTestDbName() { + return this.getClass().getSimpleName() + "db" + ++objNum; + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + InjectableDummyAuthenticator.injectMode(false); + } + + protected void setPermissions(String locn, String permissions) throws Exception { + FileSystem fs = FileSystem.get(new URI(locn), clientHiveConf); + fs.setPermission(new Path(locn), FsPermission.valueOf(permissions)); + } + + private void validateCreateDb(Database expectedDb, String dbName) { + assertEquals(expectedDb.getName().toLowerCase(), dbName.toLowerCase()); + } + + private void validateCreateTable(Table expectedTable, String tblName, String dbName) { + assertNotNull(expectedTable); + assertEquals(expectedTable.getTableName().toLowerCase(),tblName.toLowerCase()); + assertEquals(expectedTable.getDbName().toLowerCase(),dbName.toLowerCase()); + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java index b447204..dc08271 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.security; import java.net.URI; -import java.security.AccessControlException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -48,7 +47,7 @@ protected String getAuthorizationProvider(){ @Override protected void allowCreateInDb(String dbName, String userName, String location) throws Exception { - setPermissions(location,"-rwxr--r--"); + setPermissions(location,"-rwxr--r-t"); } @Override @@ -79,7 +78,7 @@ protected void allowDropOnTable(String tblName, String userName, String location @Override protected void allowDropOnDb(String dbName, String userName, String location) throws Exception { - setPermissions(location,"-rwxr--r--"); + setPermissions(location,"-rwxr--r-t"); } protected void setPermissions(String locn, String permissions) throws Exception { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index 9bda95d..53d88b0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; @@ -141,6 +142,47 @@ public void testInputAllColumnsUsed() throws HiveAuthzPluginException, HiveAcces } @Test + public void testCreateTableWithDb() throws HiveAuthzPluginException, HiveAccessControlException, + CommandNeedRetryException { + final String newTable = "ctTableWithDb"; + checkCreateViewOrTableWithDb(newTable, "create table " + dbName + "." + newTable + "(i int)"); + } + + @Test + public void testCreateViewWithDb() throws HiveAuthzPluginException, HiveAccessControlException, + CommandNeedRetryException { + final String newTable = "ctViewWithDb"; + checkCreateViewOrTableWithDb(newTable, "create table " + dbName + "." + newTable + "(i int)"); + } + + private void checkCreateViewOrTableWithDb(String newTable, String cmd) + throws HiveAuthzPluginException, HiveAccessControlException { + reset(mockedAuthorizer); + int status = driver.compile(cmd); + assertEquals(0, status); + + List outputs = getHivePrivilegeObjectInputs().getRight(); + assertEquals("num outputs", 2, outputs.size()); + for (HivePrivilegeObject output : outputs) { + switch (output.getType()) { + case DATABASE: + assertTrue("database name", output.getDbname().equalsIgnoreCase(dbName)); + break; + case TABLE_OR_VIEW: + assertTrue("database name", output.getDbname().equalsIgnoreCase(dbName)); + assertEqualsIgnoreCase("table name", output.getObjectName(), newTable); + break; + default: + fail("Unexpected type : " + output.getType()); + } + } + } + + private void assertEqualsIgnoreCase(String msg, String expected, String actual) { + assertEquals(msg, expected.toLowerCase(), actual.toLowerCase()); + } + + @Test public void testInputNoColumnsUsed() throws HiveAuthzPluginException, HiveAccessControlException, CommandNeedRetryException { diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java b/itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java new file mode 100644 index 0000000..fd89921 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/TestHS2ImpersonationWithRemoteMS.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; +import org.apache.hive.jdbc.miniHS2.MiniHS2; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +import java.util.HashMap; + +/** + * Test HiveServer2 sends correct user name to remote MetaStore server for user impersonation. + */ +public class TestHS2ImpersonationWithRemoteMS { + + private static MiniHS2 miniHS2 = null; + + @BeforeClass + public static void startServices() throws Exception { + HiveConf hiveConf = new HiveConf(); + hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS, 1); + hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS, 1); + hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, true); + hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + + miniHS2 = new MiniHS2.Builder() + .withMiniMR() + .withRemoteMetastore() + .withConf(hiveConf).build(); + + miniHS2.start(new HashMap()); + } + + @AfterClass + public static void stopServices() throws Exception { + if (miniHS2 != null && miniHS2.isStarted()) { + miniHS2.stop(); + } + } + + @Test + public void testImpersonation() throws Exception { + assertTrue("Test setup failed. MiniHS2 is not initialized", + miniHS2 != null && miniHS2.isStarted()); + + Class.forName(MiniHS2.getJdbcDriverName()); + + // Create two tables one as user "foo" and other as user "bar" + Connection hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "foo", null); + Statement stmt = hs2Conn.createStatement(); + + String tableName = "foo_table"; + stmt.execute("drop table if exists " + tableName); + stmt.execute("create table " + tableName + " (value string)"); + + stmt.close(); + hs2Conn.close(); + + hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(), "bar", null); + stmt = hs2Conn.createStatement(); + + tableName = "bar_table"; + stmt.execute("drop table if exists " + tableName); + stmt.execute("create table " + tableName + " (value string)"); + + stmt.close(); + hs2Conn.close(); + + MiniDFSShim dfs = miniHS2.getDfs(); + FileSystem fs = dfs.getFileSystem(); + + FileStatus[] files = fs.listStatus(miniHS2.getWareHouseDir()); + boolean fooTableValidated = false; + boolean barTableValidated = false; + for(FileStatus file : files) { + final String name = file.getPath().getName(); + final String owner = file.getOwner(); + if (name.equals("foo_table")) { + fooTableValidated = owner.equals("foo"); + assertTrue(String.format("User 'foo' table has wrong ownership '%s'", owner), + fooTableValidated); + } else if (name.equals("bar_table")) { + barTableValidated = owner.equals("bar"); + assertTrue(String.format("User 'bar' table has wrong ownership '%s'", owner), + barTableValidated); + } else { + fail(String.format("Unexpected table directory '%s' in warehouse", name)); + } + + System.out.println(String.format("File: %s, Owner: %s", name, owner)); + } + + assertTrue("User 'foo' table not found in warehouse", fooTableValidated); + assertTrue("User 'bar' table not found in warehouse", barTableValidated); + } +} diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 88ef4f8..02d71f8 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -86,6 +86,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ mapreduce2.q,\ merge1.q,\ merge2.q,\ + metadataonly1.q,\ metadata_only_queries.q,\ optimize_nullscan.q,\ orc_analyze.q,\ diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java index 89429b6..88004c8 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerForTest.java @@ -30,7 +30,7 @@ * To be used for testing purposes only! */ @Private -public class SQLStdHiveAccessControllerForTest extends SQLStdHiveAccessController { +public class SQLStdHiveAccessControllerForTest extends SQLStdHiveAccessControllerWrapper { SQLStdHiveAccessControllerForTest(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java index 1d039ad..8edb253 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidatorForTest.java @@ -38,7 +38,7 @@ public SQLStdHiveAuthorizationValidatorForTest(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, - SQLStdHiveAccessController privController) { + SQLStdHiveAccessControllerWrapper privController) { super(metastoreClientFactory, conf, authenticator, privController); } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java index 0f41a8f..bf00ae4 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactoryForTest.java @@ -32,7 +32,7 @@ @Override public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { - SQLStdHiveAccessController privilegeManager = + SQLStdHiveAccessControllerWrapper privilegeManager = new SQLStdHiveAccessControllerForTest(metastoreClientFactory, conf, authenticator, ctx); return new HiveAuthorizerImpl( privilegeManager, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 06d7595..9e3481a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -250,7 +250,7 @@ public TTransport getTransport(TTransport trans) { private static String currentUrl; private Warehouse wh; // hdfs warehouse - private final ThreadLocal threadLocalMS = + private static final ThreadLocal threadLocalMS = new ThreadLocal() { @Override protected synchronized RawStore initialValue() { @@ -265,6 +265,14 @@ protected synchronized TxnHandler initialValue() { } }; + public static RawStore getRawStore() { + return threadLocalMS.get(); + } + + public static void removeRawStore() { + threadLocalMS.remove(); + } + // Thread local configuration is needed as many threads could make changes // to the conf using the connection hook private final ThreadLocal threadLocalConf = @@ -384,6 +392,7 @@ public HiveConf getHiveConf() { } } + @Override public void init() throws MetaException { rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL); initListeners = MetaStoreUtils.getMetaStoreListeners( diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 51c3f2c..5c8769a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -45,9 +45,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -163,19 +165,25 @@ public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, return updateUnpartitionedTableStatsFast(db, tbl, wh, madeDir, false); } + public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh, + boolean madeDir, boolean forceRecompute) throws MetaException { + return updateUnpartitionedTableStatsFast(tbl, + wh.getFileStatusesForUnpartitionedTable(db, tbl), madeDir, forceRecompute); + } + /** * Updates the numFiles and totalSize parameters for the passed unpartitioned Table by querying * the warehouse if the passed Table does not already have values for these parameters. - * @param db * @param tbl - * @param wh + * @param fileStatus * @param newDir if true, the directory was just created and can be assumed to be empty * @param forceRecompute Recompute stats even if the passed Table already has * these parameters set * @return true if the stats were updated, false otherwise */ - public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, Warehouse wh, - boolean newDir, boolean forceRecompute) throws MetaException { + public static boolean updateUnpartitionedTableStatsFast(Table tbl, + FileStatus[] fileStatus, boolean newDir, boolean forceRecompute) throws MetaException { + Map params = tbl.getParameters(); boolean updated = false; if (forceRecompute || @@ -188,7 +196,6 @@ public static boolean updateUnpartitionedTableStatsFast(Database db, Table tbl, // The table location already exists and may contain data. // Let's try to populate those stats that don't require full scan. LOG.info("Updating table stats fast for " + tbl.getTableName()); - FileStatus[] fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl); populateQuickStats(fileStatus, params); LOG.info("Updated size of table " + tbl.getTableName() +" to "+ params.get(StatsSetupConst.TOTAL_SIZE)); if(!params.containsKey(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK)) { @@ -1043,11 +1050,17 @@ public static void makeDir(Path path, HiveConf hiveConf) throws MetaException { public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { + startMetaStore(port, bridge, new HiveConf(HMSHandler.class)); + } + + public static void startMetaStore(final int port, + final HadoopThriftAuthBridge bridge, final HiveConf hiveConf) + throws Exception{ Thread thread = new Thread(new Runnable() { @Override public void run() { try { - HiveMetaStore.startMetaStore(port, bridge); + HiveMetaStore.startMetaStore(port, bridge, hiveConf); } catch (Throwable e) { LOG.error("Metastore Thrift Server threw an exception...",e); } @@ -1057,6 +1070,7 @@ public void run() { thread.start(); loopUntilHMSReady(port); } + /** * A simple connect test to make sure that the metastore is up * @throws Exception diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 0693039..4e76236 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -20,6 +20,7 @@ import static org.apache.commons.lang.StringUtils.join; +import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -129,6 +130,7 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; import org.datanucleus.store.rdbms.exceptions.MissingTableException; @@ -252,6 +254,8 @@ private void initialize(Properties dsProps) { expressionProxy = createExpressionProxy(hiveConf); directSql = new MetaStoreDirectSql(pm); } + LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + + " created in the thread with id: " + Thread.currentThread().getId()); } /** @@ -295,6 +299,16 @@ private static Properties getDataSourceProps(Configuration conf) { } } } + // Password may no longer be in the conf, use getPassword() + try { + String passwd = + ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname); + if (passwd != null && !passwd.isEmpty()) { + prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd); + } + } catch (IOException err) { + throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err); + } if (LOG.isDebugEnabled()) { for (Entry e : prop.entrySet()) { @@ -343,6 +357,8 @@ public PersistenceManager getPersistenceManager() { @Override public void shutdown() { if (pm != null) { + LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + + " will be shutdown"); pm.close(); } } diff --git a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java index db5f5e2..2ba2838 100644 --- a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java +++ b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java @@ -3735,6 +3735,515 @@ public Builder clearMaximum() { // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.DateStatistics) } + public interface TimestampStatisticsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional sint64 minimum = 1; + /** + * optional sint64 minimum = 1; + * + *
+     * min,max values saved as milliseconds since epoch
+     * 
+ */ + boolean hasMinimum(); + /** + * optional sint64 minimum = 1; + * + *
+     * min,max values saved as milliseconds since epoch
+     * 
+ */ + long getMinimum(); + + // optional sint64 maximum = 2; + /** + * optional sint64 maximum = 2; + */ + boolean hasMaximum(); + /** + * optional sint64 maximum = 2; + */ + long getMaximum(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics} + */ + public static final class TimestampStatistics extends + com.google.protobuf.GeneratedMessage + implements TimestampStatisticsOrBuilder { + // Use TimestampStatistics.newBuilder() to construct. + private TimestampStatistics(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TimestampStatistics(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TimestampStatistics defaultInstance; + public static TimestampStatistics getDefaultInstance() { + return defaultInstance; + } + + public TimestampStatistics getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TimestampStatistics( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + minimum_ = input.readSInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + maximum_ = input.readSInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TimestampStatistics parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TimestampStatistics(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional sint64 minimum = 1; + public static final int MINIMUM_FIELD_NUMBER = 1; + private long minimum_; + /** + * optional sint64 minimum = 1; + * + *
+     * min,max values saved as milliseconds since epoch
+     * 
+ */ + public boolean hasMinimum() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 minimum = 1; + * + *
+     * min,max values saved as milliseconds since epoch
+     * 
+ */ + public long getMinimum() { + return minimum_; + } + + // optional sint64 maximum = 2; + public static final int MAXIMUM_FIELD_NUMBER = 2; + private long maximum_; + /** + * optional sint64 maximum = 2; + */ + public boolean hasMaximum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 maximum = 2; + */ + public long getMaximum() { + return maximum_; + } + + private void initFields() { + minimum_ = 0L; + maximum_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeSInt64(1, minimum_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeSInt64(2, maximum_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(1, minimum_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeSInt64Size(2, maximum_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.TimestampStatistics} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.class, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder.class); + } + + // Construct using org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + minimum_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + maximum_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor; + } + + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getDefaultInstanceForType() { + return org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance(); + } + + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics build() { + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics buildPartial() { + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics result = new org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.minimum_ = minimum_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.maximum_ = maximum_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) { + return mergeFrom((org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics other) { + if (other == org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) return this; + if (other.hasMinimum()) { + setMinimum(other.getMinimum()); + } + if (other.hasMaximum()) { + setMaximum(other.getMaximum()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional sint64 minimum = 1; + private long minimum_ ; + /** + * optional sint64 minimum = 1; + * + *
+       * min,max values saved as milliseconds since epoch
+       * 
+ */ + public boolean hasMinimum() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional sint64 minimum = 1; + * + *
+       * min,max values saved as milliseconds since epoch
+       * 
+ */ + public long getMinimum() { + return minimum_; + } + /** + * optional sint64 minimum = 1; + * + *
+       * min,max values saved as milliseconds since epoch
+       * 
+ */ + public Builder setMinimum(long value) { + bitField0_ |= 0x00000001; + minimum_ = value; + onChanged(); + return this; + } + /** + * optional sint64 minimum = 1; + * + *
+       * min,max values saved as milliseconds since epoch
+       * 
+ */ + public Builder clearMinimum() { + bitField0_ = (bitField0_ & ~0x00000001); + minimum_ = 0L; + onChanged(); + return this; + } + + // optional sint64 maximum = 2; + private long maximum_ ; + /** + * optional sint64 maximum = 2; + */ + public boolean hasMaximum() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional sint64 maximum = 2; + */ + public long getMaximum() { + return maximum_; + } + /** + * optional sint64 maximum = 2; + */ + public Builder setMaximum(long value) { + bitField0_ |= 0x00000002; + maximum_ = value; + onChanged(); + return this; + } + /** + * optional sint64 maximum = 2; + */ + public Builder clearMaximum() { + bitField0_ = (bitField0_ & ~0x00000002); + maximum_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics) + } + + static { + defaultInstance = new TimestampStatistics(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.ql.io.orc.TimestampStatistics) + } + public interface BinaryStatisticsOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -4273,6 +4782,20 @@ public Builder clearSum() { * optional .org.apache.hadoop.hive.ql.io.orc.BinaryStatistics binaryStatistics = 8; */ org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatisticsOrBuilder getBinaryStatisticsOrBuilder(); + + // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + boolean hasTimestampStatistics(); + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics(); + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder(); } /** * Protobuf type {@code org.apache.hadoop.hive.ql.io.orc.ColumnStatistics} @@ -4421,6 +4944,19 @@ private ColumnStatistics( bitField0_ |= 0x00000080; break; } + case 74: { + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = timestampStatistics_.toBuilder(); + } + timestampStatistics_ = input.readMessage(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(timestampStatistics_); + timestampStatistics_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4631,6 +5167,28 @@ public boolean hasBinaryStatistics() { return binaryStatistics_; } + // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + public static final int TIMESTAMPSTATISTICS_FIELD_NUMBER = 9; + private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_; + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public boolean hasTimestampStatistics() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() { + return timestampStatistics_; + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() { + return timestampStatistics_; + } + private void initFields() { numberOfValues_ = 0L; intStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.IntegerStatistics.getDefaultInstance(); @@ -4640,6 +5198,7 @@ private void initFields() { decimalStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DecimalStatistics.getDefaultInstance(); dateStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.DateStatistics.getDefaultInstance(); binaryStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.BinaryStatistics.getDefaultInstance(); + timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4677,6 +5236,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeMessage(8, binaryStatistics_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(9, timestampStatistics_); + } getUnknownFields().writeTo(output); } @@ -4718,6 +5280,10 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(8, binaryStatistics_); } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, timestampStatistics_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4833,6 +5399,7 @@ private void maybeForceBuilderInitialization() { getDecimalStatisticsFieldBuilder(); getDateStatisticsFieldBuilder(); getBinaryStatisticsFieldBuilder(); + getTimestampStatisticsFieldBuilder(); } } private static Builder create() { @@ -4885,6 +5452,12 @@ public Builder clear() { binaryStatisticsBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000080); + if (timestampStatisticsBuilder_ == null) { + timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance(); + } else { + timestampStatisticsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); return this; } @@ -4973,6 +5546,14 @@ public Builder clone() { } else { result.binaryStatistics_ = binaryStatisticsBuilder_.build(); } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000100; + } + if (timestampStatisticsBuilder_ == null) { + result.timestampStatistics_ = timestampStatistics_; + } else { + result.timestampStatistics_ = timestampStatisticsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5013,6 +5594,9 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.ColumnStatist if (other.hasBinaryStatistics()) { mergeBinaryStatistics(other.getBinaryStatistics()); } + if (other.hasTimestampStatistics()) { + mergeTimestampStatistics(other.getTimestampStatistics()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5892,6 +6476,123 @@ public Builder clearBinaryStatistics() { return binaryStatisticsBuilder_; } + // optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + private org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder> timestampStatisticsBuilder_; + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public boolean hasTimestampStatistics() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics getTimestampStatistics() { + if (timestampStatisticsBuilder_ == null) { + return timestampStatistics_; + } else { + return timestampStatisticsBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public Builder setTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) { + if (timestampStatisticsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + timestampStatistics_ = value; + onChanged(); + } else { + timestampStatisticsBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public Builder setTimestampStatistics( + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder builderForValue) { + if (timestampStatisticsBuilder_ == null) { + timestampStatistics_ = builderForValue.build(); + onChanged(); + } else { + timestampStatisticsBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public Builder mergeTimestampStatistics(org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics value) { + if (timestampStatisticsBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100) && + timestampStatistics_ != org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance()) { + timestampStatistics_ = + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.newBuilder(timestampStatistics_).mergeFrom(value).buildPartial(); + } else { + timestampStatistics_ = value; + } + onChanged(); + } else { + timestampStatisticsBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000100; + return this; + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public Builder clearTimestampStatistics() { + if (timestampStatisticsBuilder_ == null) { + timestampStatistics_ = org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.getDefaultInstance(); + onChanged(); + } else { + timestampStatisticsBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000100); + return this; + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder getTimestampStatisticsBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return getTimestampStatisticsFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + public org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder getTimestampStatisticsOrBuilder() { + if (timestampStatisticsBuilder_ != null) { + return timestampStatisticsBuilder_.getMessageOrBuilder(); + } else { + return timestampStatistics_; + } + } + /** + * optional .org.apache.hadoop.hive.ql.io.orc.TimestampStatistics timestampStatistics = 9; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder> + getTimestampStatisticsFieldBuilder() { + if (timestampStatisticsBuilder_ == null) { + timestampStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatistics.Builder, org.apache.hadoop.hive.ql.io.orc.OrcProto.TimestampStatisticsOrBuilder>( + timestampStatistics_, + getParentForChildren(), + isClean()); + timestampStatistics_ = null; + } + return timestampStatisticsBuilder_; + } + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.ql.io.orc.ColumnStatistics) } @@ -16654,6 +17355,11 @@ public Builder setMagicBytes( com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -16742,74 +17448,78 @@ public Builder setMagicBytes( "nt\030\001 \003(\004B\002\020\001\"B\n\021DecimalStatistics\022\017\n\007min" + "imum\030\001 \001(\t\022\017\n\007maximum\030\002 \001(\t\022\013\n\003sum\030\003 \001(\t" + "\"2\n\016DateStatistics\022\017\n\007minimum\030\001 \001(\021\022\017\n\007m", - "aximum\030\002 \001(\021\"\037\n\020BinaryStatistics\022\013\n\003sum\030" + - "\001 \001(\022\"\310\004\n\020ColumnStatistics\022\026\n\016numberOfVa" + - "lues\030\001 \001(\004\022J\n\rintStatistics\030\002 \001(\01323.org." + - "apache.hadoop.hive.ql.io.orc.IntegerStat" + - "istics\022L\n\020doubleStatistics\030\003 \001(\01322.org.a" + - "pache.hadoop.hive.ql.io.orc.DoubleStatis" + - "tics\022L\n\020stringStatistics\030\004 \001(\01322.org.apa" + - "che.hadoop.hive.ql.io.orc.StringStatisti" + - "cs\022L\n\020bucketStatistics\030\005 \001(\01322.org.apach" + - "e.hadoop.hive.ql.io.orc.BucketStatistics", - "\022N\n\021decimalStatistics\030\006 \001(\01323.org.apache" + - ".hadoop.hive.ql.io.orc.DecimalStatistics" + - "\022H\n\016dateStatistics\030\007 \001(\01320.org.apache.ha" + - "doop.hive.ql.io.orc.DateStatistics\022L\n\020bi" + - "naryStatistics\030\010 \001(\01322.org.apache.hadoop" + - ".hive.ql.io.orc.BinaryStatistics\"n\n\rRowI" + - "ndexEntry\022\025\n\tpositions\030\001 \003(\004B\002\020\001\022F\n\nstat" + - "istics\030\002 \001(\01322.org.apache.hadoop.hive.ql" + - ".io.orc.ColumnStatistics\"J\n\010RowIndex\022>\n\005" + - "entry\030\001 \003(\0132/.org.apache.hadoop.hive.ql.", - "io.orc.RowIndexEntry\"\331\001\n\006Stream\022;\n\004kind\030" + - "\001 \002(\0162-.org.apache.hadoop.hive.ql.io.orc" + - ".Stream.Kind\022\016\n\006column\030\002 \001(\r\022\016\n\006length\030\003" + - " \001(\004\"r\n\004Kind\022\013\n\007PRESENT\020\000\022\010\n\004DATA\020\001\022\n\n\006L" + - "ENGTH\020\002\022\023\n\017DICTIONARY_DATA\020\003\022\024\n\020DICTIONA" + - "RY_COUNT\020\004\022\r\n\tSECONDARY\020\005\022\r\n\tROW_INDEX\020\006" + - "\"\263\001\n\016ColumnEncoding\022C\n\004kind\030\001 \002(\01625.org." + - "apache.hadoop.hive.ql.io.orc.ColumnEncod" + - "ing.Kind\022\026\n\016dictionarySize\030\002 \001(\r\"D\n\004Kind" + - "\022\n\n\006DIRECT\020\000\022\016\n\nDICTIONARY\020\001\022\r\n\tDIRECT_V", - "2\020\002\022\021\n\rDICTIONARY_V2\020\003\"\214\001\n\014StripeFooter\022" + - "9\n\007streams\030\001 \003(\0132(.org.apache.hadoop.hiv" + - "e.ql.io.orc.Stream\022A\n\007columns\030\002 \003(\01320.or" + - "g.apache.hadoop.hive.ql.io.orc.ColumnEnc" + - "oding\"\370\002\n\004Type\0229\n\004kind\030\001 \002(\0162+.org.apach" + - "e.hadoop.hive.ql.io.orc.Type.Kind\022\024\n\010sub" + - "types\030\002 \003(\rB\002\020\001\022\022\n\nfieldNames\030\003 \003(\t\022\025\n\rm" + - "aximumLength\030\004 \001(\r\022\021\n\tprecision\030\005 \001(\r\022\r\n" + - "\005scale\030\006 \001(\r\"\321\001\n\004Kind\022\013\n\007BOOLEAN\020\000\022\010\n\004BY" + - "TE\020\001\022\t\n\005SHORT\020\002\022\007\n\003INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FL", - "OAT\020\005\022\n\n\006DOUBLE\020\006\022\n\n\006STRING\020\007\022\n\n\006BINARY\020" + - "\010\022\r\n\tTIMESTAMP\020\t\022\010\n\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006S" + - "TRUCT\020\014\022\t\n\005UNION\020\r\022\013\n\007DECIMAL\020\016\022\010\n\004DATE\020" + - "\017\022\013\n\007VARCHAR\020\020\022\010\n\004CHAR\020\021\"x\n\021StripeInform" + - "ation\022\016\n\006offset\030\001 \001(\004\022\023\n\013indexLength\030\002 \001" + - "(\004\022\022\n\ndataLength\030\003 \001(\004\022\024\n\014footerLength\030\004" + - " \001(\004\022\024\n\014numberOfRows\030\005 \001(\004\"/\n\020UserMetada" + - "taItem\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\014\"X\n\020S" + - "tripeStatistics\022D\n\010colStats\030\001 \003(\01322.org." + - "apache.hadoop.hive.ql.io.orc.ColumnStati", - "stics\"S\n\010Metadata\022G\n\013stripeStats\030\001 \003(\01322" + - ".org.apache.hadoop.hive.ql.io.orc.Stripe" + - "Statistics\"\356\002\n\006Footer\022\024\n\014headerLength\030\001 " + - "\001(\004\022\025\n\rcontentLength\030\002 \001(\004\022D\n\007stripes\030\003 " + - "\003(\01323.org.apache.hadoop.hive.ql.io.orc.S" + - "tripeInformation\0225\n\005types\030\004 \003(\0132&.org.ap" + - "ache.hadoop.hive.ql.io.orc.Type\022D\n\010metad" + - "ata\030\005 \003(\01322.org.apache.hadoop.hive.ql.io" + - ".orc.UserMetadataItem\022\024\n\014numberOfRows\030\006 " + - "\001(\004\022F\n\nstatistics\030\007 \003(\01322.org.apache.had", - "oop.hive.ql.io.orc.ColumnStatistics\022\026\n\016r" + - "owIndexStride\030\010 \001(\r\"\305\001\n\nPostScript\022\024\n\014fo" + - "oterLength\030\001 \001(\004\022F\n\013compression\030\002 \001(\01621." + - "org.apache.hadoop.hive.ql.io.orc.Compres" + - "sionKind\022\034\n\024compressionBlockSize\030\003 \001(\004\022\023" + - "\n\007version\030\004 \003(\rB\002\020\001\022\026\n\016metadataLength\030\005 " + - "\001(\004\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKind\022\010" + - "\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003" + "aximum\030\002 \001(\021\"7\n\023TimestampStatistics\022\017\n\007m" + + "inimum\030\001 \001(\022\022\017\n\007maximum\030\002 \001(\022\"\037\n\020BinaryS" + + "tatistics\022\013\n\003sum\030\001 \001(\022\"\234\005\n\020ColumnStatist" + + "ics\022\026\n\016numberOfValues\030\001 \001(\004\022J\n\rintStatis" + + "tics\030\002 \001(\01323.org.apache.hadoop.hive.ql.i" + + "o.orc.IntegerStatistics\022L\n\020doubleStatist" + + "ics\030\003 \001(\01322.org.apache.hadoop.hive.ql.io" + + ".orc.DoubleStatistics\022L\n\020stringStatistic" + + "s\030\004 \001(\01322.org.apache.hadoop.hive.ql.io.o" + + "rc.StringStatistics\022L\n\020bucketStatistics\030", + "\005 \001(\01322.org.apache.hadoop.hive.ql.io.orc" + + ".BucketStatistics\022N\n\021decimalStatistics\030\006" + + " \001(\01323.org.apache.hadoop.hive.ql.io.orc." + + "DecimalStatistics\022H\n\016dateStatistics\030\007 \001(" + + "\01320.org.apache.hadoop.hive.ql.io.orc.Dat" + + "eStatistics\022L\n\020binaryStatistics\030\010 \001(\01322." + + "org.apache.hadoop.hive.ql.io.orc.BinaryS" + + "tatistics\022R\n\023timestampStatistics\030\t \001(\01325" + + ".org.apache.hadoop.hive.ql.io.orc.Timest" + + "ampStatistics\"n\n\rRowIndexEntry\022\025\n\tpositi", + "ons\030\001 \003(\004B\002\020\001\022F\n\nstatistics\030\002 \001(\01322.org." + + "apache.hadoop.hive.ql.io.orc.ColumnStati" + + "stics\"J\n\010RowIndex\022>\n\005entry\030\001 \003(\0132/.org.a" + + "pache.hadoop.hive.ql.io.orc.RowIndexEntr" + + "y\"\331\001\n\006Stream\022;\n\004kind\030\001 \002(\0162-.org.apache." + + "hadoop.hive.ql.io.orc.Stream.Kind\022\016\n\006col" + + "umn\030\002 \001(\r\022\016\n\006length\030\003 \001(\004\"r\n\004Kind\022\013\n\007PRE" + + "SENT\020\000\022\010\n\004DATA\020\001\022\n\n\006LENGTH\020\002\022\023\n\017DICTIONA" + + "RY_DATA\020\003\022\024\n\020DICTIONARY_COUNT\020\004\022\r\n\tSECON" + + "DARY\020\005\022\r\n\tROW_INDEX\020\006\"\263\001\n\016ColumnEncoding", + "\022C\n\004kind\030\001 \002(\01625.org.apache.hadoop.hive." + + "ql.io.orc.ColumnEncoding.Kind\022\026\n\016diction" + + "arySize\030\002 \001(\r\"D\n\004Kind\022\n\n\006DIRECT\020\000\022\016\n\nDIC" + + "TIONARY\020\001\022\r\n\tDIRECT_V2\020\002\022\021\n\rDICTIONARY_V" + + "2\020\003\"\214\001\n\014StripeFooter\0229\n\007streams\030\001 \003(\0132(." + + "org.apache.hadoop.hive.ql.io.orc.Stream\022" + + "A\n\007columns\030\002 \003(\01320.org.apache.hadoop.hiv" + + "e.ql.io.orc.ColumnEncoding\"\370\002\n\004Type\0229\n\004k" + + "ind\030\001 \002(\0162+.org.apache.hadoop.hive.ql.io" + + ".orc.Type.Kind\022\024\n\010subtypes\030\002 \003(\rB\002\020\001\022\022\n\n", + "fieldNames\030\003 \003(\t\022\025\n\rmaximumLength\030\004 \001(\r\022" + + "\021\n\tprecision\030\005 \001(\r\022\r\n\005scale\030\006 \001(\r\"\321\001\n\004Ki" + + "nd\022\013\n\007BOOLEAN\020\000\022\010\n\004BYTE\020\001\022\t\n\005SHORT\020\002\022\007\n\003" + + "INT\020\003\022\010\n\004LONG\020\004\022\t\n\005FLOAT\020\005\022\n\n\006DOUBLE\020\006\022\n" + + "\n\006STRING\020\007\022\n\n\006BINARY\020\010\022\r\n\tTIMESTAMP\020\t\022\010\n" + + "\004LIST\020\n\022\007\n\003MAP\020\013\022\n\n\006STRUCT\020\014\022\t\n\005UNION\020\r\022" + + "\013\n\007DECIMAL\020\016\022\010\n\004DATE\020\017\022\013\n\007VARCHAR\020\020\022\010\n\004C" + + "HAR\020\021\"x\n\021StripeInformation\022\016\n\006offset\030\001 \001" + + "(\004\022\023\n\013indexLength\030\002 \001(\004\022\022\n\ndataLength\030\003 " + + "\001(\004\022\024\n\014footerLength\030\004 \001(\004\022\024\n\014numberOfRow", + "s\030\005 \001(\004\"/\n\020UserMetadataItem\022\014\n\004name\030\001 \002(" + + "\t\022\r\n\005value\030\002 \002(\014\"X\n\020StripeStatistics\022D\n\010" + + "colStats\030\001 \003(\01322.org.apache.hadoop.hive." + + "ql.io.orc.ColumnStatistics\"S\n\010Metadata\022G" + + "\n\013stripeStats\030\001 \003(\01322.org.apache.hadoop." + + "hive.ql.io.orc.StripeStatistics\"\356\002\n\006Foot" + + "er\022\024\n\014headerLength\030\001 \001(\004\022\025\n\rcontentLengt" + + "h\030\002 \001(\004\022D\n\007stripes\030\003 \003(\01323.org.apache.ha" + + "doop.hive.ql.io.orc.StripeInformation\0225\n" + + "\005types\030\004 \003(\0132&.org.apache.hadoop.hive.ql", + ".io.orc.Type\022D\n\010metadata\030\005 \003(\01322.org.apa" + + "che.hadoop.hive.ql.io.orc.UserMetadataIt" + + "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" + + " \003(\01322.org.apache.hadoop.hive.ql.io.orc." + + "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" + + "\"\305\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" + + "\013compression\030\002 \001(\01621.org.apache.hadoop.h" + + "ive.ql.io.orc.CompressionKind\022\034\n\024compres" + + "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001" + + "\022\026\n\016metadataLength\030\005 \001(\004\022\016\n\005magic\030\300> \001(\t", + "*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022" + + "\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -16852,86 +17562,92 @@ public Builder setMagicBytes( com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_DateStatistics_descriptor, new java.lang.String[] { "Minimum", "Maximum", }); - internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor = + internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor = getDescriptor().getMessageTypes().get(6); + internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_ql_io_orc_TimestampStatistics_descriptor, + new java.lang.String[] { "Minimum", "Maximum", }); + internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor = + getDescriptor().getMessageTypes().get(7); internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_BinaryStatistics_descriptor, new java.lang.String[] { "Sum", }); internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnStatistics_descriptor, - new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", }); + new java.lang.String[] { "NumberOfValues", "IntStatistics", "DoubleStatistics", "StringStatistics", "BucketStatistics", "DecimalStatistics", "DateStatistics", "BinaryStatistics", "TimestampStatistics", }); internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndexEntry_descriptor, new java.lang.String[] { "Positions", "Statistics", }); internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_RowIndex_descriptor, new java.lang.String[] { "Entry", }); internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_Stream_descriptor, new java.lang.String[] { "Kind", "Column", "Length", }); internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_ColumnEncoding_descriptor, new java.lang.String[] { "Kind", "DictionarySize", }); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_StripeFooter_descriptor, new java.lang.String[] { "Streams", "Columns", }); internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_org_apache_hadoop_hive_ql_io_orc_Type_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_Type_descriptor, new java.lang.String[] { "Kind", "Subtypes", "FieldNames", "MaximumLength", "Precision", "Scale", }); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_StripeInformation_descriptor, new java.lang.String[] { "Offset", "IndexLength", "DataLength", "FooterLength", "NumberOfRows", }); internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_UserMetadataItem_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_StripeStatistics_descriptor, new java.lang.String[] { "ColStats", }); internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(18); internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_Metadata_descriptor, new java.lang.String[] { "StripeStats", }); internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(19); internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_Footer_descriptor, new java.lang.String[] { "HeaderLength", "ContentLength", "Stripes", "Types", "Metadata", "NumberOfRows", "Statistics", "RowIndexStride", }); internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(20); internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 1dde78e..d4e61d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -92,8 +92,8 @@ protected transient ListBucketingCtx lbCtx; protected transient boolean isSkewedStoredAsSubDirectories; protected transient boolean statsCollectRawDataSize; - private transient boolean[] statsFromRecordWriter; - private transient boolean isCollectRWStats; + protected transient boolean[] statsFromRecordWriter; + protected transient boolean isCollectRWStats; private transient FSPaths prevFsp; private transient FSPaths fpaths; private transient ObjectInspector keyOI; @@ -626,7 +626,7 @@ public void processOp(Object row, int tag) throws HiveException { } } - private boolean areAllTrue(boolean[] statsFromRW) { + protected boolean areAllTrue(boolean[] statsFromRW) { for(boolean b : statsFromRW) { if (!b) { return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 50847de..237c976 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -112,6 +112,7 @@ import org.apache.hadoop.hive.ql.exec.mr.ExecReducer; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; +import org.apache.hadoop.hive.ql.exec.tez.DagUtils; import org.apache.hadoop.hive.ql.exec.tez.TezTask; import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; @@ -1546,6 +1547,24 @@ public static void renameOrMoveFiles(FileSystem fs, Path src, Path dst) throws I Pattern.compile("^.*?([0-9]+)(_[0-9]{1,6})?(\\..*)?$"); /** + * Some jobs like "INSERT INTO" jobs create copies of files like 0000001_0_copy_2. + * For such files, + * Group 1: 00000001 [taskId] + * Group 3: 0 [task attempId] + * Group 4: _copy_2 [copy suffix] + * Group 6: copy [copy keyword] + * Group 8: 2 [copy file index] + */ + private static final Pattern COPY_FILE_NAME_TO_TASK_ID_REGEX = + Pattern.compile("^.*?"+ // any prefix + "([0-9]+)"+ // taskId + "(_)"+ // separator + "([0-9]{1,6})?"+ // attemptId (limited to 6 digits) + "((_)(\\Bcopy\\B)(_)"+ // copy keyword + "([0-9]{1,6})$)?"+ // copy file index + "(\\..*)?$"); // any suffix/file extension + + /** * This retruns prefix part + taskID for bucket join for partitioned table */ private static final Pattern FILE_NAME_PREFIXED_TASK_ID_REGEX = @@ -1870,21 +1889,42 @@ public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws I // speculative runs), but the largest should be the correct one since the result // of a successful run should never be smaller than a failed/speculative run. FileStatus toDelete = null; - if (otherFile.getLen() >= one.getLen()) { - toDelete = one; - } else { - toDelete = otherFile; - taskIdToFile.put(taskId, one); - } - long len1 = toDelete.getLen(); - long len2 = taskIdToFile.get(taskId).getLen(); - if (!fs.delete(toDelete.getPath(), true)) { - throw new IOException("Unable to delete duplicate file: " + toDelete.getPath() - + ". Existing file: " + taskIdToFile.get(taskId).getPath()); + + // "LOAD .. INTO" and "INSERT INTO" commands will generate files with + // "_copy_x" suffix. These files are usually read by map tasks and the + // task output gets written to some tmp path. The output file names will + // be of format taskId_attemptId. The usual path for all these tasks is + // srcPath -> taskTmpPath -> tmpPath -> finalPath. + // But, MergeFileTask can move files directly from src path to final path + // without copying it to tmp path. In such cases, different files with + // "_copy_x" suffix will be identified as duplicates (change in value + // of x is wrongly identified as attempt id) and will be deleted. + // To avoid that we will ignore files with "_copy_x" suffix from duplicate + // elimination. + if (!isCopyFile(one.getPath().getName())) { + if (otherFile.getLen() >= one.getLen()) { + toDelete = one; + } else { + toDelete = otherFile; + taskIdToFile.put(taskId, one); + } + long len1 = toDelete.getLen(); + long len2 = taskIdToFile.get(taskId).getLen(); + if (!fs.delete(toDelete.getPath(), true)) { + throw new IOException( + "Unable to delete duplicate file: " + toDelete.getPath() + + ". Existing file: " + + taskIdToFile.get(taskId).getPath()); + } else { + LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + + " with length " + + len1 + ". Existing file: " + + taskIdToFile.get(taskId).getPath() + " with length " + + len2); + } } else { - LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + " with length " - + len1 + ". Existing file: " + taskIdToFile.get(taskId).getPath() + " with length " - + len2); + LOG.info(one.getPath() + " file identified as duplicate. This file is" + + " not deleted as it has copySuffix."); } } } @@ -1892,6 +1932,29 @@ public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws I return taskIdToFile; } + public static boolean isCopyFile(String filename) { + String taskId = filename; + String copyFileSuffix = null; + int dirEnd = filename.lastIndexOf(Path.SEPARATOR); + if (dirEnd != -1) { + taskId = filename.substring(dirEnd + 1); + } + Matcher m = COPY_FILE_NAME_TO_TASK_ID_REGEX.matcher(taskId); + if (!m.matches()) { + LOG.warn("Unable to verify if file name " + filename + " has _copy_ suffix."); + } else { + taskId = m.group(1); + copyFileSuffix = m.group(4); + } + + LOG.debug("Filename: " + filename + " TaskId: " + taskId + " CopySuffix: " + copyFileSuffix); + if (taskId != null && copyFileSuffix != null) { + return true; + } + + return false; + } + public static String getNameMessage(Exception e) { return e.getClass().getName() + "(" + e.getMessage() + ")"; } @@ -3040,7 +3103,7 @@ public static double getHighestSamplePercentage (MapWork work) { * so we don't want to depend on scratch dir and context. */ public static List getInputPathsTez(JobConf job, MapWork work) throws Exception { - String scratchDir = HiveConf.getVar(job, HiveConf.ConfVars.SCRATCHDIR); + String scratchDir = job.get(DagUtils.TEZ_TMP_DIR_KEY); // we usually don't want to create dummy files for tez, however the metadata only // optimization relies on it. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index e116426..ebe9f92 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -124,6 +124,7 @@ */ public class DagUtils { + public static final String TEZ_TMP_DIR_KEY = "_hive_tez_tmp_dir"; private static final Log LOG = LogFactory.getLog(DagUtils.class.getName()); private static final String TEZ_DIR = "_tez_scratch_dir"; private static DagUtils instance; @@ -158,7 +159,7 @@ private void addCredentials(ReduceWork reduceWork, DAG dag) { * Creates the configuration object necessary to run a specific vertex from * map work. This includes input formats, input processor, etc. */ - private JobConf initializeVertexConf(JobConf baseConf, MapWork mapWork) { + private JobConf initializeVertexConf(JobConf baseConf, Context context, MapWork mapWork) { JobConf conf = new JobConf(baseConf); if (mapWork.getNumMapTasks() != null) { @@ -200,6 +201,7 @@ private JobConf initializeVertexConf(JobConf baseConf, MapWork mapWork) { inpFormat = CombineHiveInputFormat.class.getName(); } + conf.set(TEZ_TMP_DIR_KEY, context.getMRTmpPath().toUri().toString()); conf.set("mapred.mapper.class", ExecMapper.class.getName()); conf.set("mapred.input.format.class", inpFormat); @@ -524,7 +526,7 @@ private Vertex createVertex(JobConf conf, MapWork mapWork, /* * Helper function to create JobConf for specific ReduceWork. */ - private JobConf initializeVertexConf(JobConf baseConf, ReduceWork reduceWork) { + private JobConf initializeVertexConf(JobConf baseConf, Context context, ReduceWork reduceWork) { JobConf conf = new JobConf(baseConf); conf.set("mapred.reducer.class", ExecReducer.class.getName()); @@ -896,14 +898,14 @@ public JobConf createConfiguration(HiveConf hiveConf) throws IOException { * @param work BaseWork will be used to populate the configuration object. * @return JobConf new configuration object */ - public JobConf initializeVertexConf(JobConf conf, BaseWork work) { + public JobConf initializeVertexConf(JobConf conf, Context context, BaseWork work) { // simply dispatch the call to the right method for the actual (sub-) type of // BaseWork. if (work instanceof MapWork) { - return initializeVertexConf(conf, (MapWork)work); + return initializeVertexConf(conf, context, (MapWork)work); } else if (work instanceof ReduceWork) { - return initializeVertexConf(conf, (ReduceWork)work); + return initializeVertexConf(conf, context, (ReduceWork)work); } else { assert false; return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 951e918..62de830 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -263,7 +263,7 @@ DAG build(JobConf conf, TezWork work, Path scratchDir, } } else { // Regular vertices - JobConf wxConf = utils.initializeVertexConf(conf, w); + JobConf wxConf = utils.initializeVertexConf(conf, ctx, w); Vertex wx = utils.createVertex(wxConf, w, scratchDir, appJarLr, additionalLr, fs, ctx, !isFinal, work); dag.addVertex(wx); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java index c6a7c00..e546dd1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java @@ -145,7 +145,11 @@ public void processOp(Object data, int tag) throws HiveException { } rowOutWriters = fpaths.getOutWriters(); - if (conf.isGatherStats()) { + // check if all record writers implement statistics. if atleast one RW + // doesn't implement stats interface we will fallback to conventional way + // of gathering stats + isCollectRWStats = areAllTrue(statsFromRecordWriter); + if (conf.isGatherStats() && !isCollectRWStats) { if (statsCollectRawDataSize) { SerDeStats stats = serializer.getSerDeStats(); if (stats != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java index 7ed50b4..e3bc3b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ReadEntity.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.ql.hooks; import java.io.Serializable; +import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import java.util.Set; import org.apache.hadoop.fs.Path; @@ -49,7 +51,8 @@ // For views, the entities can be nested - by default, entities are at the top level private final Set parents = new HashSet(); - + // The accessed columns of query + private final List accessedColumns = new ArrayList(); /** * For serialization only. @@ -159,4 +162,8 @@ public boolean needsLock() { public void noLockNeeded() { needsLock = false; } + + public List getAccessedColumns() { + return accessedColumns; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index ae89182..0e8807e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -66,7 +66,7 @@ public void run(SessionState sess, Set inputs, case TABLE: { Table t = db.getTable(re.getTable().getTableName()); t.setLastAccessTime(lastAccessTime); - db.alterTable(t.getTableName(), t); + db.alterTable(t.getDbName() + "." + t.getTableName(), t); break; } case PARTITION: { @@ -76,7 +76,7 @@ public void run(SessionState sess, Set inputs, p.setLastAccessTime(lastAccessTime); db.alterPartition(t.getTableName(), p); t.setLastAccessTime(lastAccessTime); - db.alterTable(t.getTableName(), t); + db.alterTable(t.getDbName() + "." + t.getTableName(), t); break; } default: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java index 1e01001..eeb343b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexMetadataChangeTask.java @@ -77,7 +77,7 @@ protected int execute(DriverContext driverContext) { FileSystem fs = url.getFileSystem(conf); FileStatus fstat = fs.getFileStatus(url); tbl.getParameters().put(HiveIndex.INDEX_TABLE_CREATETIME, Long.toString(fstat.getModificationTime())); - db.alterTable(tbl.getTableName(), tbl); + db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl); } } catch (Exception e) { e.printStackTrace(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java index 27e251c..b076933 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java @@ -233,7 +233,8 @@ public void analyzeIndexDefinition(Table baseTable, Index index, StringBuilder command= new StringBuilder(); LinkedHashMap partSpec = indexTblPartDesc.getPartSpec(); - command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName )); + command.append("INSERT OVERWRITE TABLE " + + HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(indexTableName )); if (partitioned && indexTblPartDesc != null) { command.append(" PARTITION ( "); List ret = getPartKVPairStringArray(partSpec); @@ -257,7 +258,8 @@ public void analyzeIndexDefinition(Table baseTable, Index index, command.append("EWAH_BITMAP("); command.append(VirtualColumn.ROWOFFSET.getName()); command.append(")"); - command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName) ); + command.append(" FROM " + + HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(baseTableName)); LinkedHashMap basePartSpec = baseTablePartDesc.getPartSpec(); if(basePartSpec != null) { command.append(" WHERE "); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java index e7434a3..0ca5d22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java @@ -103,7 +103,8 @@ public void analyzeIndexDefinition(Table baseTable, Index index, StringBuilder command= new StringBuilder(); LinkedHashMap partSpec = indexTblPartDesc.getPartSpec(); - command.append("INSERT OVERWRITE TABLE " + HiveUtils.unparseIdentifier(indexTableName )); + command.append("INSERT OVERWRITE TABLE " + + HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(indexTableName )); if (partitioned && indexTblPartDesc != null) { command.append(" PARTITION ( "); List ret = getPartKVPairStringArray(partSpec); @@ -126,7 +127,8 @@ public void analyzeIndexDefinition(Table baseTable, Index index, command.append(" collect_set ("); command.append(VirtualColumn.BLOCKOFFSET.getName()); command.append(") "); - command.append(" FROM " + HiveUtils.unparseIdentifier(baseTableName) ); + command.append(" FROM " + + HiveUtils.unparseIdentifier(dbName) + "." + HiveUtils.unparseIdentifier(baseTableName)); LinkedHashMap basePartSpec = baseTablePartDesc.getPartSpec(); if(basePartSpec != null) { command.append(" WHERE "); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java index beb4f7d..6c691b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeMapper.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.io.merge; -import java.io.IOException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; @@ -31,6 +29,10 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + public class MergeMapper extends MapReduceBase { protected JobConf jc; protected Path finalPath; @@ -48,6 +50,7 @@ protected Path tmpPath; protected Path taskTmpPath; protected Path dpPath; + protected Set incompatFileSet; public final static Log LOG = LogFactory.getLog("MergeMapper"); @@ -62,6 +65,7 @@ public void configure(JobConf job) { HiveConf.ConfVars.HIVEMERGECURRENTJOBCONCATENATELISTBUCKETINGDEPTH); Path specPath = MergeOutputFormat.getMergeOutputPath(job); + incompatFileSet = new HashSet(); Path tmpPath = Utilities.toTempPath(specPath); Path taskTmpPath = Utilities.toTaskTempPath(specPath); updatePaths(tmpPath, taskTmpPath); @@ -176,6 +180,23 @@ public void close() throws IOException { if (!fs.rename(outPath, finalPath)) { throw new IOException("Unable to rename output to " + finalPath); } + + // move any incompatible files to final path + if (!incompatFileSet.isEmpty()) { + for (Path incompatFile : incompatFileSet) { + String fileName = incompatFile.getName(); + Path destFile = new Path(finalPath.getParent(), fileName); + try { + Utilities.renameOrMoveFiles(fs, incompatFile, destFile); + LOG.info("Moved incompatible file " + incompatFile + " to " + + destFile); + } catch (HiveException e) { + LOG.error("Unable to move " + incompatFile + " to " + destFile); + throw new IOException(e); + } + } + } + } else { if (!autoDelete) { fs.delete(outPath, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java index 682b5a0..65b5ca8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hive.ql.io.orc; +import java.sql.Timestamp; + import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -716,6 +718,99 @@ public String toString() { } } + private static final class TimestampStatisticsImpl extends ColumnStatisticsImpl + implements TimestampColumnStatistics { + private Long minimum = null; + private Long maximum = null; + + TimestampStatisticsImpl() { + } + + TimestampStatisticsImpl(OrcProto.ColumnStatistics stats) { + super(stats); + OrcProto.TimestampStatistics timestampStats = stats.getTimestampStatistics(); + // min,max values serialized/deserialized as int (milliseconds since epoch) + if (timestampStats.hasMaximum()) { + maximum = timestampStats.getMaximum(); + } + if (timestampStats.hasMinimum()) { + minimum = timestampStats.getMinimum(); + } + } + + @Override + void reset() { + super.reset(); + minimum = null; + maximum = null; + } + + @Override + void updateTimestamp(Timestamp value) { + if (minimum == null) { + minimum = value.getTime(); + maximum = value.getTime(); + } else if (minimum > value.getTime()) { + minimum = value.getTime(); + } else if (maximum < value.getTime()) { + maximum = value.getTime(); + } + } + + @Override + void merge(ColumnStatisticsImpl other) { + super.merge(other); + TimestampStatisticsImpl timestampStats = (TimestampStatisticsImpl) other; + if (minimum == null) { + minimum = timestampStats.minimum; + maximum = timestampStats.maximum; + } else if (timestampStats.minimum != null) { + if (minimum > timestampStats.minimum) { + minimum = timestampStats.minimum; + } else if (maximum < timestampStats.maximum) { + maximum = timestampStats.maximum; + } + } + } + + @Override + OrcProto.ColumnStatistics.Builder serialize() { + OrcProto.ColumnStatistics.Builder result = super.serialize(); + OrcProto.TimestampStatistics.Builder timestampStats = OrcProto.TimestampStatistics + .newBuilder(); + if (getNumberOfValues() != 0) { + timestampStats.setMinimum(minimum); + timestampStats.setMaximum(maximum); + } + result.setTimestampStatistics(timestampStats); + return result; + } + + @Override + public Timestamp getMinimum() { + Timestamp minTimestamp = new Timestamp(minimum); + return minTimestamp; + } + + @Override + public Timestamp getMaximum() { + Timestamp maxTimestamp = new Timestamp(maximum); + return maxTimestamp; + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(super.toString()); + if (getNumberOfValues() != 0) { + buf.append(" min: "); + buf.append(minimum); + buf.append(" max: "); + buf.append(maximum); + } + return buf.toString(); + } + } + private long count = 0; ColumnStatisticsImpl(OrcProto.ColumnStatistics stats) { @@ -759,6 +854,10 @@ void updateDate(DateWritable value) { throw new UnsupportedOperationException("Can't update date"); } + void updateTimestamp(Timestamp value) { + throw new UnsupportedOperationException("Can't update timestamp"); + } + void merge(ColumnStatisticsImpl stats) { count += stats.count; } @@ -806,6 +905,8 @@ static ColumnStatisticsImpl create(ObjectInspector inspector) { return new DecimalStatisticsImpl(); case DATE: return new DateStatisticsImpl(); + case TIMESTAMP: + return new TimestampStatisticsImpl(); case BINARY: return new BinaryStatisticsImpl(); default: @@ -829,6 +930,8 @@ static ColumnStatisticsImpl deserialize(OrcProto.ColumnStatistics stats) { return new DecimalStatisticsImpl(stats); } else if (stats.hasDateStatistics()) { return new DateStatisticsImpl(stats); + } else if (stats.hasTimestampStatistics()) { + return new TimestampStatisticsImpl(stats); } else if(stats.hasBinaryStatistics()) { return new BinaryStatisticsImpl(stats); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java index 555f17c..6b230e9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java @@ -120,14 +120,14 @@ public static void main(String[] args) throws Exception { RowIndex[] indices = rows.readRowIndex(stripeIx); for (int col : rowIndexCols) { StringBuilder buf = new StringBuilder(); - buf.append(" Column ").append(col).append(": row index"); + buf.append(" Row group index column ").append(col).append(":"); RowIndex index = null; if ((col >= indices.length) || ((index = indices[col]) == null)) { buf.append(" not found\n"); continue; } for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) { - buf.append(" RG ").append(entryIx).append(": "); + buf.append("\n Entry ").append(entryIx).append(":"); RowIndexEntry entry = index.getEntry(entryIx); if (entry == null) { buf.append("unknown\n"); @@ -139,15 +139,17 @@ public static void main(String[] args) throws Exception { } else { ColumnStatistics cs = ColumnStatisticsImpl.deserialize(colStats); Object min = RecordReaderImpl.getMin(cs), max = RecordReaderImpl.getMax(cs); - buf.append("[").append(min).append(", ").append(max).append(") at "); + buf.append(" count: ").append(cs.getNumberOfValues()); + buf.append(" min: ").append(min); + buf.append(" max: ").append(max); } + buf.append(" positions: "); for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) { if (posIx != 0) { buf.append(","); } buf.append(entry.getPositions(posIx)); } - buf.append("\n"); } System.out.println(buf); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java index b36152a..13ec642 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileMergeMapper.java @@ -18,20 +18,24 @@ package org.apache.hadoop.hive.ql.io.orc; -import java.io.IOException; -import java.util.List; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.merge.MergeMapper; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.shims.CombineHiveKey; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Mapper; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + /** * Map task fast merging of ORC files. */ @@ -96,31 +100,9 @@ public void map(Object key, OrcFileValueWrapper value, OutputCollector getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException, + InvalidInputException, InvalidObjectException { + if (getTempTable(dbName, tableName) != null) { + return getTempTableColumnStats(dbName, tableName, colNames); + } + return super.getTableColumnStatistics(dbName, tableName, colNames); + } + + /** {@inheritDoc} */ + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException { + if (getTempTable(dbName, tableName) != null) { + return deleteTempTableColumnStats(dbName, tableName, colName); + } + return super.deleteTableColumnStatistics(dbName, tableName, colName); + } + private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { @@ -277,15 +330,19 @@ private void alterTempTable(String dbname, String tbl_name, org.apache.hadoop.hive.metastore.api.Table oldt, org.apache.hadoop.hive.metastore.api.Table newt, EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { - Table newTable = new Table(deepCopyAndLowerCaseTable(newt)); dbname = dbname.toLowerCase(); tbl_name = tbl_name.toLowerCase(); + boolean shouldDeleteColStats = false; // Disallow changing temp table location if (!newt.getSd().getLocation().equals(oldt.getSd().getLocation())) { throw new MetaException("Temp table location cannot be changed"); } + org.apache.hadoop.hive.metastore.api.Table newtCopy = deepCopyAndLowerCaseTable(newt); + MetaStoreUtils.updateUnpartitionedTableStatsFast(newtCopy, + wh.getFileStatusesForSD(newtCopy.getSd()), false, true); + Table newTable = new Table(newtCopy); String newDbName = newTable.getDbName(); String newTableName = newTable.getTableName(); if (!newDbName.equals(oldt.getDbName()) || !newTableName.equals(oldt.getTableName())) { @@ -303,6 +360,7 @@ private void alterTempTable(String dbname, String tbl_name, if (tables == null || tables.remove(tbl_name) == null) { throw new MetaException("Could not find temp table entry for " + dbname + "." + tbl_name); } + shouldDeleteColStats = true; tables = getTempTablesForDatabase(newDbName); if (tables == null) { @@ -311,8 +369,50 @@ private void alterTempTable(String dbname, String tbl_name, } tables.put(newTableName, newTable); } else { + if (haveTableColumnsChanged(oldt, newt)) { + shouldDeleteColStats = true; + } getTempTablesForDatabase(dbname).put(tbl_name, newTable); } + + if (shouldDeleteColStats) { + try { + deleteTempTableColumnStatsForTable(dbname, tbl_name); + } catch (NoSuchObjectException err){ + // No stats to delete, forgivable error. + LOG.info(err); + } + } + } + + private static boolean haveTableColumnsChanged(org.apache.hadoop.hive.metastore.api.Table oldt, + org.apache.hadoop.hive.metastore.api.Table newt) { + List oldCols = oldt.getSd().getCols(); + List newCols = newt.getSd().getCols(); + if (oldCols.size() != newCols.size()) { + return true; + } + Iterator oldColsIter = oldCols.iterator(); + Iterator newColsIter = newCols.iterator(); + while (oldColsIter.hasNext()) { + // Don't use FieldSchema.equals() since it also compares comments, + // which is unnecessary for this method. + if (!fieldSchemaEqualsIgnoreComment(oldColsIter.next(), newColsIter.next())) { + return true; + } + } + return false; + } + + private static boolean fieldSchemaEqualsIgnoreComment(FieldSchema left, FieldSchema right) { + // Just check name/type for equality, don't compare comment + if (!left.getName().equals(right.getName())) { + return true; + } + if (!left.getType().equals(right.getType())) { + return true; + } + return false; } private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boolean deleteData, @@ -373,4 +473,102 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo } return ss.getTempTables().get(dbName); } + + private Map getTempTableColumnStatsForTable(String dbName, + String tableName) { + SessionState ss = SessionState.get(); + if (ss == null) { + LOG.debug("No current SessionState, skipping temp tables"); + return null; + } + String lookupName = StatsUtils.getFullyQualifiedTableName(dbName.toLowerCase(), + tableName.toLowerCase()); + return ss.getTempTableColStats().get(lookupName); + } + + private static List copyColumnStatisticsObjList(Map csoMap) { + List retval = new ArrayList(csoMap.size()); + for (ColumnStatisticsObj cso : csoMap.values()) { + retval.add(new ColumnStatisticsObj(cso)); + } + return retval; + } + + private List getTempTableColumnStats(String dbName, String tableName, + List colNames) { + Map tableColStats = + getTempTableColumnStatsForTable(dbName, tableName); + List retval = new ArrayList(); + + if (tableColStats != null) { + for (String colName : colNames) { + colName = colName.toLowerCase(); + if (tableColStats.containsKey(colName)) { + retval.add(new ColumnStatisticsObj(tableColStats.get(colName))); + } + } + } + return retval; + } + + private boolean updateTempTableColumnStats(String dbName, String tableName, + ColumnStatistics colStats) throws MetaException { + SessionState ss = SessionState.get(); + if (ss == null) { + throw new MetaException("No current SessionState, cannot update temporary table stats for " + + dbName + "." + tableName); + } + Map ssTableColStats = + getTempTableColumnStatsForTable(dbName, tableName); + if (ssTableColStats == null) { + // Add new entry for this table + ssTableColStats = new HashMap(); + ss.getTempTableColStats().put( + StatsUtils.getFullyQualifiedTableName(dbName, tableName), + ssTableColStats); + } + mergeColumnStats(ssTableColStats, colStats); + return true; + } + + private static void mergeColumnStats(Map oldStats, + ColumnStatistics newStats) { + List newColList = newStats.getStatsObj(); + if (newColList != null) { + for (ColumnStatisticsObj colStat : newColList) { + // This is admittedly a bit simple, StatsObjectConverter seems to allow + // old stats attributes to be kept if the new values do not overwrite them. + oldStats.put(colStat.getColName().toLowerCase(), colStat); + } + } + } + + private boolean deleteTempTableColumnStatsForTable(String dbName, String tableName) + throws NoSuchObjectException { + Map deletedEntry = + getTempTableColumnStatsForTable(dbName, tableName); + if (deletedEntry != null) { + SessionState.get().getTempTableColStats().remove( + StatsUtils.getFullyQualifiedTableName(dbName, tableName)); + } else { + throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + + " temp table=" + tableName); + } + return true; + } + + private boolean deleteTempTableColumnStats(String dbName, String tableName, String columnName) + throws NoSuchObjectException { + ColumnStatisticsObj deletedEntry = null; + Map ssTableColStats = + getTempTableColumnStatsForTable(dbName, tableName); + if (ssTableColStats != null) { + deletedEntry = ssTableColStats.remove(columnName.toLowerCase()); + } + if (deletedEntry == null) { + throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + + " temp table=" + tableName); + } + return true; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index a622095..05b7f48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -815,9 +815,11 @@ boolean validateReduceWorkOperator(Operator op) { ret = validateSelectOperator((SelectOperator) op); break; case REDUCESINK: - ret = validateReduceSinkOperator((ReduceSinkOperator) op); - break; + ret = validateReduceSinkOperator((ReduceSinkOperator) op); + break; case FILESINK: + ret = validateFileSinkOperator((FileSinkOperator) op); + break; case LIMIT: ret = true; break; @@ -899,6 +901,15 @@ private boolean validateExtractOperator(ExtractOperator op) { return true; } + private boolean validateFileSinkOperator(FileSinkOperator op) { + // HIVE-7557: For now, turn off dynamic partitioning to give more time to + // figure out how to make VectorFileSink work correctly with it... + if (op.getConf().getDynPartCtx() != null) { + return false; + } + return true; + } + private boolean validateExprNodeDesc(List descs) { return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 987ecc2..01c1d30 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -104,7 +104,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, tsop.setStatistics(stats.clone()); if (LOG.isDebugEnabled()) { - LOG.debug("[0] STATS-" + tsop.toString() + ": " + stats.extendedToString()); + LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() + + "): " + stats.extendedToString()); } } catch (CloneNotSupportedException e) { throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg()); @@ -1092,7 +1093,9 @@ private void updateJoinColumnsNDV(Map> joinKeys, String key = entry.getValue().get(joinColIdx); key = StatsUtils.stripPrefixFromColumnName(key); ColStatistics cs = joinedColStats.get(key); - cs.setCountDistint(minNDV); + if (cs != null) { + cs.setCountDistint(minNDV); + } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 60d490f..b5b2b60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; import org.apache.hadoop.hive.ql.exec.FetchTask; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.LineageInfo; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; @@ -61,7 +62,6 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.io.DateWritable; @@ -317,7 +317,7 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD return new String[] {dbName, tableName}; } String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); - return new String[]{SessionState.get().getCurrentDatabase(), tableName}; + return Utilities.getDbTableName(tableName); } public static String getDotName(String[] qname) throws SemanticException { @@ -646,6 +646,20 @@ public tableSpec(Hive db, HiveConf conf, ASTNode ast) this(db, conf, ast, true, false); } + public tableSpec(Hive db, HiveConf conf, String tableName, Map partSpec) + throws HiveException { + this.tableName = tableName; + this.partSpec = partSpec; + this.tableHandle = db.getTable(tableName); + if (partSpec != null) { + this.specType = SpecType.STATIC_PARTITION; + this.partHandle = db.getPartition(tableHandle, partSpec, false); + this.partitions = Arrays.asList(partHandle); + } else { + this.specType = SpecType.TABLE_ONLY; + } + } + public tableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartitionsSpec, boolean allowPartialPartitionsSpec) throws SemanticException { assert (ast.getToken().getType() == HiveParser.TOK_TAB @@ -1188,21 +1202,26 @@ protected Database getDatabase(String dbName) throws SemanticException { } protected Database getDatabase(String dbName, boolean throwException) throws SemanticException { + Database database; try { - Database database = db.getDatabase(dbName); - if (database == null && throwException) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName)); - } - return database; - } catch (HiveException e) { + database = db.getDatabase(dbName); + } catch (Exception e) { throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName), e); } + if (database == null && throwException) { + throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName)); + } + return database; } protected Table getTable(String[] qualified) throws SemanticException { return getTable(qualified[0], qualified[1], true); } + protected Table getTable(String[] qualified, boolean throwException) throws SemanticException { + return getTable(qualified[0], qualified[1], throwException); + } + protected Table getTable(String tblName) throws SemanticException { return getTable(null, tblName, true); } @@ -1213,43 +1232,46 @@ protected Table getTable(String tblName, boolean throwException) throws Semantic protected Table getTable(String database, String tblName, boolean throwException) throws SemanticException { + Table tab; try { - Table tab = database == null ? db.getTable(tblName, false) + tab = database == null ? db.getTable(tblName, false) : db.getTable(database, tblName, false); - if (tab == null && throwException) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); - } - return tab; - } catch (HiveException e) { + } catch (Exception e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName), e); } + if (tab == null && throwException) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); + } + return tab; } protected Partition getPartition(Table table, Map partSpec, boolean throwException) throws SemanticException { + Partition partition; try { - Partition partition = db.getPartition(table, partSpec, false); - if (partition == null && throwException) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); - } - return partition; - } catch (HiveException e) { + partition = db.getPartition(table, partSpec, false); + } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } + if (partition == null && throwException) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); + } + return partition; } protected List getPartitions(Table table, Map partSpec, boolean throwException) throws SemanticException { + List partitions; try { - List partitions = partSpec == null ? db.getPartitions(table) : + partitions = partSpec == null ? db.getPartitions(table) : db.getPartitions(table, partSpec); - if (partitions.isEmpty() && throwException) { - throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); - } - return partitions; - } catch (HiveException e) { + } catch (Exception e) { throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec), e); } + if (partitions.isEmpty() && throwException) { + throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec)); + } + return partitions; } protected String toMessage(ErrorMsg message, Object detail) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 3f8648b..44c193f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -363,7 +363,6 @@ public ColumnStatsSemanticAnalyzer(HiveConf conf, ASTNode tree) throws SemanticE originalTree = tree; boolean isPartitionStats = isPartitionLevelStats(tree); Map partSpec = null; - checkIfTemporaryTable(); checkForPartitionColumns(colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys())); validateSpecifiedColumnNames(colNames); if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) { @@ -414,13 +413,6 @@ private void checkForPartitionColumns(List specifiedCols, List p } } - private void checkIfTemporaryTable() throws SemanticException { - if (tbl.isTemporary()) { - throw new SemanticException(tbl.getTableName() - + " is a temporary table. Column statistics are not supported on temporary tables."); - } - } - @Override public void analyze(ASTNode ast, Context origCtx) throws SemanticException { QB qb; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index f31a409..05cde3e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -249,39 +249,67 @@ public DDLSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException { } @Override - public void analyzeInternal(ASTNode ast) throws SemanticException { - - switch (ast.getToken().getType()) { - case HiveParser.TOK_ALTERTABLE_PARTITION: { - ASTNode tablePart = (ASTNode) ast.getChild(0); - TablePartition tblPart = new TablePartition(tablePart); - String tableName = tblPart.tableName; - HashMap partSpec = tblPart.partSpec; - ast = (ASTNode) ast.getChild(1); - if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { + public void analyzeInternal(ASTNode input) throws SemanticException { + + ASTNode ast = input; + switch (ast.getType()) { + case HiveParser.TOK_ALTERTABLE: { + ast = (ASTNode) input.getChild(1); + String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); + String tableName = getDotName(qualified); + HashMap partSpec = DDLSemanticAnalyzer.getPartSpec((ASTNode) input.getChild(2)); + if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) { + analyzeAlterTableRename(qualified, ast, false); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) { + analyzeAlterTableTouch(qualified, ast); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ARCHIVE) { + analyzeAlterTableArchive(qualified, ast, false); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { + analyzeAlterTableArchive(qualified, ast, true); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) { + analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.ADDCOLS); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { + analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.REPLACECOLS); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { + analyzeAlterTableRenameCol(qualified, ast); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { + analyzeAlterTableAddParts(qualified, ast, false); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { + analyzeAlterTableDropParts(qualified, ast, false); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) { + analyzeAlterTablePartColType(qualified, ast); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { + analyzeAlterTableProps(qualified, ast, false, false); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { + analyzeAlterTableProps(qualified, ast, false, true); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) { + analyzeAltertableSkewedby(qualified, ast); + } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) { + analyzeExchangePartition(qualified, ast); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { analyzeAlterTableFileFormat(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROTECTMODE) { analyzeAlterTableProtectMode(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { analyzeAlterTableLocation(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { - analyzeAlterTablePartMergeFiles(tablePart, ast, tableName, partSpec); + analyzeAlterTablePartMergeFiles(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) { analyzeAlterTableSerde(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { analyzeAlterTableSerdeProps(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { analyzeAlterTableRenamePart(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION) { + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) { analyzeAlterTableSkewedLocation(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_TABLEBUCKETS) { + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS) { analyzeAlterTableBucketNum(ast, tableName, partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { analyzeAlterTableClusterSort(ast, tableName, partSpec); - } else if (ast.getToken().getType() == HiveParser.TOK_COMPACT) { + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_COMPACT) { analyzeAlterTableCompact(ast, tableName, partSpec); } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){ - analyzeAlterTableUpdateStats(ast,tblPart); + analyzeAlterTableUpdateStats(ast, tableName, partSpec); } break; } @@ -360,66 +388,22 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { case HiveParser.TOK_DROPVIEW: analyzeDropTable(ast, true); break; - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - analyzeAlterTableProps(ast, true, false); - break; - case HiveParser.TOK_DROPVIEW_PROPERTIES: - analyzeAlterTableProps(ast, true, true); - break; - case HiveParser.TOK_ALTERVIEW_ADDPARTS: - // for ALTER VIEW ADD PARTITION, we wrapped the ADD to discriminate - // view from table; unwrap it now - analyzeAlterTableAddParts((ASTNode) ast.getChild(0), true); - break; - case HiveParser.TOK_ALTERVIEW_DROPPARTS: - // for ALTER VIEW DROP PARTITION, we wrapped the DROP to discriminate - // view from table; unwrap it now - analyzeAlterTableDropParts((ASTNode) ast.getChild(0), true); - break; - case HiveParser.TOK_ALTERVIEW_RENAME: - // for ALTER VIEW RENAME, we wrapped the RENAME to discriminate - // view from table; unwrap it now - analyzeAlterTableRename(((ASTNode) ast.getChild(0)), true); - break; - case HiveParser.TOK_ALTERTABLE_RENAME: - analyzeAlterTableRename(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS: - analyzeAlterTableUpdateStats(ast, null); - break; - case HiveParser.TOK_ALTERTABLE_TOUCH: - analyzeAlterTableTouch(ast); - break; - case HiveParser.TOK_ALTERTABLE_ARCHIVE: - analyzeAlterTableArchive(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_UNARCHIVE: - analyzeAlterTableArchive(ast, true); - break; - case HiveParser.TOK_ALTERTABLE_ADDCOLS: - analyzeAlterTableModifyCols(ast, AlterTableTypes.ADDCOLS); - break; - case HiveParser.TOK_ALTERTABLE_REPLACECOLS: - analyzeAlterTableModifyCols(ast, AlterTableTypes.REPLACECOLS); - break; - case HiveParser.TOK_ALTERTABLE_RENAMECOL: - analyzeAlterTableRenameCol(ast); - break; - case HiveParser.TOK_ALTERTABLE_ADDPARTS: - analyzeAlterTableAddParts(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_DROPPARTS: - analyzeAlterTableDropParts(ast, false); - break; - case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: - analyzeAlterTablePartColType(ast); - break; - case HiveParser.TOK_ALTERTABLE_PROPERTIES: - analyzeAlterTableProps(ast, false, false); - break; - case HiveParser.TOK_DROPTABLE_PROPERTIES: - analyzeAlterTableProps(ast, false, true); + case HiveParser.TOK_ALTERVIEW: { + String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + ast = (ASTNode) ast.getChild(1); + if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) { + analyzeAlterTableProps(qualified, ast, true, false); + } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { + analyzeAlterTableProps(qualified, ast, true, true); + } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) { + analyzeAlterTableAddParts(qualified, ast, true); + } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) { + analyzeAlterTableDropParts(qualified, ast, true); + } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) { + analyzeAlterTableRename(qualified, ast, true); + } break; + } case HiveParser.TOK_ALTERINDEX_REBUILD: analyzeAlterIndexRebuild(ast); break; @@ -499,12 +483,6 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { case HiveParser.TOK_REVOKE: analyzeRevoke(ast); break; - case HiveParser.TOK_ALTERTABLE_SKEWED: - analyzeAltertableSkewedby(ast); - break; - case HiveParser.TOK_EXCHANGEPARTITION: - analyzeExchangePartition(ast); - break; case HiveParser.TOK_SHOW_SET_ROLE: analyzeSetShowRole(ast); break; @@ -516,20 +494,14 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } - private void analyzeAlterTableUpdateStats(ASTNode ast, TablePartition tblPart) + private void analyzeAlterTableUpdateStats(ASTNode ast, String tblName, Map partSpec) throws SemanticException { - String tblName = null; - String colName = null; - Map mapProp = null; - Map partSpec = null; + String colName = getUnescapedName((ASTNode) ast.getChild(0)); + Map mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0)); + + Table tbl = getTable(tblName); String partName = null; - if (tblPart == null) { - tblName = getUnescapedName((ASTNode) ast.getChild(0)); - colName = getUnescapedName((ASTNode) ast.getChild(1)); - mapProp = getProps((ASTNode) (ast.getChild(2)).getChild(0)); - } else { - tblName = tblPart.tableName; - partSpec = tblPart.partSpec; + if (partSpec != null) { try { partName = Warehouse.makePartName(partSpec, false); } catch (MetaException e) { @@ -537,15 +509,6 @@ private void analyzeAlterTableUpdateStats(ASTNode ast, TablePartition tblPart) throw new SemanticException("partition " + partSpec.toString() + " not found"); } - colName = getUnescapedName((ASTNode) ast.getChild(0)); - mapProp = getProps((ASTNode) (ast.getChild(1)).getChild(0)); - } - - Table tbl = null; - try { - tbl = db.getTable(tblName); - } catch (HiveException e) { - throw new SemanticException("table " + tbl + " not found"); } String colType = null; @@ -711,12 +674,12 @@ private void analyzeAlterDatabaseOwner(ASTNode ast) throws SemanticException { addAlterDbDesc(alterDesc); } - private void analyzeExchangePartition(ASTNode ast) throws SemanticException { - Table destTable = getTable(getUnescapedName((ASTNode)ast.getChild(0))); - Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(2))); + private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException { + Table destTable = getTable(qualified); + Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(1))); // Get the partition specs - Map partSpecs = getPartSpec((ASTNode) ast.getChild(1)); + Map partSpecs = getPartSpec((ASTNode) ast.getChild(0)); validatePartitionValues(partSpecs); boolean sameColumns = MetaStoreUtils.compareFieldColumns( destTable.getAllCols(), sourceTable.getAllCols()); @@ -1237,8 +1200,7 @@ private void analyzeAlterIndexProps(ASTNode ast) if (indexTableName != null) { indexTbl = getTable(Utilities.getDbTableName(index.getDbName(), indexTableName)); } - String baseTblName = index.getOrigTableName(); - Table baseTbl = getTable(baseTblName); + Table baseTbl = getTable(new String[] {index.getDbName(), index.getOrigTableName()}); String handlerCls = index.getIndexHandlerClass(); HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls); @@ -1331,16 +1293,16 @@ private void validateAlterTableType(Table tbl, AlterTableTypes op, boolean expec } } - private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isUnset) + private void analyzeAlterTableProps(String[] qualified, ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) + String tableName = getDotName(qualified); + HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) .getChild(0)); AlterTableDesc alterTblDesc = null; if (isUnset == true) { alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, expectView); - if (ast.getChild(2) != null) { + if (ast.getChild(1) != null) { alterTblDesc.setDropIfExists(true); } } else { @@ -1527,7 +1489,7 @@ private void analyzeAlterTableProtectMode(ASTNode ast, String tableName, alterTblDesc), conf)); } - private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, + private void analyzeAlterTablePartMergeFiles(ASTNode ast, String tableName, HashMap partSpec) throws SemanticException { AlterTablePartMergeFilesDesc mergeDesc = new AlterTablePartMergeFilesDesc( @@ -1639,7 +1601,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode tablePartAST, ASTNode ast, StatsWork statDesc; if (oldTblPartLoc.equals(newTblPartLoc)) { // If we're merging to the same location, we can avoid some metastore calls - tableSpec tablepart = new tableSpec(this.db, conf, tablePartAST); + tableSpec tablepart = new tableSpec(db, conf, tableName, partSpec); statDesc = new StatsWork(tablepart); } else { statDesc = new StatsWork(ltd); @@ -1672,7 +1634,7 @@ private void analyzeAlterTableClusterSort(ASTNode ast, String tableName, alterTblDesc = new AlterTableDesc(tableName, true, partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); break; - case HiveParser.TOK_TABLEBUCKETS: + case HiveParser.TOK_ALTERTABLE_BUCKETS: ASTNode buckets = (ASTNode) ast.getChild(0); List bucketCols = getColumnNames((ASTNode) buckets.getChild(0)); List sortCols = new ArrayList(); @@ -2502,9 +2464,9 @@ private void analyzeDescFunction(ASTNode ast) throws SemanticException { } - private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws SemanticException { - String[] source = getQualifiedTableName((ASTNode) ast.getChild(0)); - String[] target = getQualifiedTableName((ASTNode) ast.getChild(1)); + private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expectView) + throws SemanticException { + String[] target = getQualifiedTableName((ASTNode) ast.getChild(0)); String sourceName = getDotName(source); String targetName = getDotName(target); @@ -2515,22 +2477,21 @@ private void analyzeAlterTableRename(ASTNode ast, boolean expectView) throws Sem alterTblDesc), conf)); } - private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast) throws SemanticException { String newComment = null; String newType = null; - newType = getTypeStringFromAST((ASTNode) ast.getChild(3)); + newType = getTypeStringFromAST((ASTNode) ast.getChild(2)); boolean first = false; String flagCol = null; ASTNode positionNode = null; - if (ast.getChildCount() == 6) { - newComment = unescapeSQLString(ast.getChild(4).getText()); - positionNode = (ASTNode) ast.getChild(5); - } else if (ast.getChildCount() == 5) { - if (ast.getChild(4).getType() == HiveParser.StringLiteral) { - newComment = unescapeSQLString(ast.getChild(4).getText()); + if (ast.getChildCount() == 5) { + newComment = unescapeSQLString(ast.getChild(3).getText()); + positionNode = (ASTNode) ast.getChild(4); + } else if (ast.getChildCount() == 4) { + if (ast.getChild(3).getType() == HiveParser.StringLiteral) { + newComment = unescapeSQLString(ast.getChild(3).getText()); } else { - positionNode = (ASTNode) ast.getChild(4); + positionNode = (ASTNode) ast.getChild(3); } } @@ -2542,8 +2503,8 @@ private void analyzeAlterTableRenameCol(ASTNode ast) throws SemanticException { } } - String oldColName = ast.getChild(1).getText(); - String newColName = ast.getChild(2).getText(); + String oldColName = ast.getChild(0).getText(); + String newColName = ast.getChild(1).getText(); /* Validate the operation of renaming a column name. */ Table tab = getTable(qualified); @@ -2603,12 +2564,11 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, alterBucketNum), conf)); } - private void analyzeAlterTableModifyCols(ASTNode ast, + private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, AlterTableTypes alterType) throws SemanticException { - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); String tblName = getDotName(qualified); - List newCols = getColumns((ASTNode) ast.getChild(1)); + List newCols = getColumns((ASTNode) ast.getChild(0)); AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, alterType); @@ -2617,7 +2577,7 @@ private void analyzeAlterTableModifyCols(ASTNode ast, alterTblDesc), conf)); } - private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) + private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView) throws SemanticException { boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) @@ -2630,7 +2590,6 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) // popular case but that's kinda hacky. Let's not do it for now. boolean canGroupExprs = ifExists; - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); Table tab = getTable(qualified); Map> partSpecs = getFullPartitionSpecs(ast, tab, canGroupExprs); @@ -2649,10 +2608,8 @@ private void analyzeAlterTableDropParts(ASTNode ast, boolean expectView) rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } - private void analyzeAlterTablePartColType(ASTNode ast) + private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) throws SemanticException { - // get table name - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); // check if table exists. @@ -2664,7 +2621,7 @@ private void analyzeAlterTablePartColType(ASTNode ast) // Alter table ... partition column ( column newtype) only takes one column at a time. // It must have a column name followed with type. - ASTNode colAst = (ASTNode) ast.getChild(1); + ASTNode colAst = (ASTNode) ast.getChild(0); assert(colAst.getChildCount() == 2); FieldSchema newCol = new FieldSchema(); @@ -2710,12 +2667,11 @@ private void analyzeAlterTablePartColType(ASTNode ast) * @throws SemanticException * Parsing failed */ - private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) + private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boolean expectView) throws SemanticException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); - boolean ifNotExists = ast.getChild(1).getType() == HiveParser.TOK_IFNOTEXISTS; + boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS; Table tab = getTable(qualified); boolean isView = tab.isView(); @@ -2723,7 +2679,7 @@ private void analyzeAlterTableAddParts(CommonTree ast, boolean expectView) outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED)); int numCh = ast.getChildCount(); - int start = ifNotExists ? 2 : 1; + int start = ifNotExists ? 1 : 0; String currentLocation = null; Map currentPart = null; @@ -2840,9 +2796,8 @@ private Partition getPartitionForOutput(Table tab, Map currentPa * @throws SemanticException * Parsin failed */ - private void analyzeAlterTableTouch(CommonTree ast) + private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) throws SemanticException { - String[] qualified = getQualifiedTableName((ASTNode)ast.getChild(0)); Table tab = getTable(qualified); validateAlterTableType(tab, AlterTableTypes.TOUCH); @@ -2870,14 +2825,13 @@ private void analyzeAlterTableTouch(CommonTree ast) } } - private void analyzeAlterTableArchive(CommonTree ast, boolean isUnArchive) + private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolean isUnArchive) throws SemanticException { if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); // partition name to value List> partSpecs = getPartitionSpecs(ast); @@ -2948,7 +2902,7 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { List> partSpecs = new ArrayList>(); int childIndex = 0; // get partition metadata if partition specified - for (childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { + for (childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { Tree partspec = ast.getChild(childIndex); // sanity check if (partspec.getType() == HiveParser.TOK_PARTSPEC) { @@ -2976,7 +2930,7 @@ private void analyzeMetastoreCheck(CommonTree ast) throws SemanticException { Map> result = new HashMap>(); - for (int childIndex = 1; childIndex < ast.getChildCount(); childIndex++) { + for (int childIndex = 0; childIndex < ast.getChildCount(); childIndex++) { Tree partSpecTree = ast.getChild(childIndex); if (partSpecTree.getType() != HiveParser.TOK_PARTSPEC) continue; ExprNodeGenericFuncDesc expr = null; @@ -3184,14 +3138,13 @@ private void addTableDropPartsOutputs(Table tab, * node * @throws SemanticException */ - private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { + private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. */ HiveConf hiveConf = SessionState.get().getConf(); - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); Table tab = getTable(qualified); inputs.add(new ReadEntity(tab)); @@ -3200,7 +3153,7 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); String tableName = getDotName(qualified); - if (ast.getChildCount() == 1) { + if (ast.getChildCount() == 0) { /* Convert a skewed table to non-skewed table. */ AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, new ArrayList(), new ArrayList>()); @@ -3208,7 +3161,7 @@ private void analyzeAltertableSkewedby(ASTNode ast) throws SemanticException { rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } else { - switch (((ASTNode) ast.getChild(1)).getToken().getType()) { + switch (((ASTNode) ast.getChild(0)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: handleAlterTableSkewedBy(ast, tableName, tab); break; @@ -3255,7 +3208,7 @@ private void handleAlterTableSkewedBy(ASTNode ast, String tableName, Table tab) List skewedColNames = new ArrayList(); List> skewedValues = new ArrayList>(); /* skewed column names. */ - ASTNode skewedNode = (ASTNode) ast.getChild(1); + ASTNode skewedNode = (ASTNode) ast.getChild(0); skewedColNames = analyzeSkewedTablDDLColNames(skewedColNames, skewedNode); /* skewed value. */ analyzeDDLSkewedValues(skewedValues, skewedNode); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 32db0c7..25cd3a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -126,11 +126,10 @@ TOK_CREATEINDEX; TOK_CREATEINDEX_INDEXTBLNAME; TOK_DEFERRED_REBUILDINDEX; TOK_DROPINDEX; -TOK_DROPTABLE_PROPERTIES; TOK_LIKETABLE; TOK_DESCTABLE; TOK_DESCFUNCTION; -TOK_ALTERTABLE_PARTITION; +TOK_ALTERTABLE; TOK_ALTERTABLE_RENAME; TOK_ALTERTABLE_ADDCOLS; TOK_ALTERTABLE_RENAMECOL; @@ -152,6 +151,13 @@ TOK_ALTERTABLE_FILEFORMAT; TOK_ALTERTABLE_LOCATION; TOK_ALTERTABLE_PROPERTIES; TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION; +TOK_ALTERTABLE_DROPPROPERTIES; +TOK_ALTERTABLE_SKEWED; +TOK_ALTERTABLE_EXCHANGEPARTITION; +TOK_ALTERTABLE_SKEWED_LOCATION; +TOK_ALTERTABLE_BUCKETS; +TOK_ALTERTABLE_CLUSTER_SORT; +TOK_ALTERTABLE_COMPACT; TOK_ALTERINDEX_REBUILD; TOK_ALTERINDEX_PROPERTIES; TOK_MSCK; @@ -177,7 +183,6 @@ TOK_TABCOLLIST; TOK_TABCOL; TOK_TABLECOMMENT; TOK_TABLEPARTCOLS; -TOK_TABLEBUCKETS; TOK_TABLEROWFORMAT; TOK_TABLEROWFORMATFIELD; TOK_TABLEROWFORMATCOLLITEMS; @@ -192,7 +197,6 @@ TOK_DISABLE; TOK_READONLY; TOK_NO_DROP; TOK_STORAGEHANDLER; -TOK_ALTERTABLE_CLUSTER_SORT; TOK_NOT_CLUSTERED; TOK_NOT_SORTED; TOK_TABCOLNAME; @@ -215,9 +219,9 @@ TOK_DROPMACRO; TOK_TEMPORARY; TOK_CREATEVIEW; TOK_DROPVIEW; -TOK_ALTERVIEW_AS; +TOK_ALTERVIEW; TOK_ALTERVIEW_PROPERTIES; -TOK_DROPVIEW_PROPERTIES; +TOK_ALTERVIEW_DROPPROPERTIES; TOK_ALTERVIEW_ADDPARTS; TOK_ALTERVIEW_DROPPARTS; TOK_ALTERVIEW_RENAME; @@ -302,8 +306,6 @@ TOK_TABLESKEWED; TOK_TABCOLVALUE; TOK_TABCOLVALUE_PAIR; TOK_TABCOLVALUES; -TOK_ALTERTABLE_SKEWED; -TOK_ALTERTBLPART_SKEWED_LOCATION; TOK_SKEWED_LOCATIONS; TOK_SKEWED_LOCATION_LIST; TOK_SKEWED_LOCATION_MAP; @@ -315,7 +317,6 @@ TOK_WINDOWSPEC; TOK_WINDOWVALUES; TOK_WINDOWRANGE; TOK_IGNOREPROTECTION; -TOK_EXCHANGEPARTITION; TOK_SUBQUERY_EXPR; TOK_SUBQUERY_OP; TOK_SUBQUERY_OP_NOTIN; @@ -328,7 +329,6 @@ TOK_FILE; TOK_JAR; TOK_RESOURCE_URI; TOK_RESOURCE_LIST; -TOK_COMPACT; TOK_SHOW_COMPACTIONS; TOK_SHOW_TRANSACTIONS; TOK_DELETE_FROM; @@ -935,56 +935,62 @@ dropTableStatement alterStatement @init { pushMsg("alter statement", state); } @after { popMsg(state); } - : KW_ALTER! - ( - KW_TABLE! alterTableStatementSuffix - | - KW_VIEW! alterViewStatementSuffix - | - KW_INDEX! alterIndexStatementSuffix - | - (KW_DATABASE|KW_SCHEMA)! alterDatabaseStatementSuffix - ) + : KW_ALTER KW_TABLE tableName alterTableStatementSuffix -> ^(TOK_ALTERTABLE tableName alterTableStatementSuffix) + | KW_ALTER KW_VIEW tableName KW_AS? alterViewStatementSuffix -> ^(TOK_ALTERVIEW tableName alterViewStatementSuffix) + | KW_ALTER KW_INDEX alterIndexStatementSuffix -> alterIndexStatementSuffix + | KW_ALTER (KW_DATABASE|KW_SCHEMA) alterDatabaseStatementSuffix -> alterDatabaseStatementSuffix ; alterTableStatementSuffix @init { pushMsg("alter table statement", state); } @after { popMsg(state); } - : alterStatementSuffixRename + : alterStatementSuffixRename[true] | alterStatementSuffixAddCol | alterStatementSuffixRenameCol | alterStatementSuffixUpdateStatsCol - | alterStatementSuffixDropPartitions - | alterStatementSuffixAddPartitions + | alterStatementSuffixDropPartitions[true] + | alterStatementSuffixAddPartitions[true] | alterStatementSuffixTouch | alterStatementSuffixArchive | alterStatementSuffixUnArchive | alterStatementSuffixProperties - | alterTblPartitionStatement | alterStatementSuffixSkewedby | alterStatementSuffixExchangePartition | alterStatementPartitionKeyType + | partitionSpec? alterTblPartitionStatementSuffix -> alterTblPartitionStatementSuffix partitionSpec? ; +alterTblPartitionStatementSuffix +@init {pushMsg("alter table partition statement suffix", state);} +@after {popMsg(state);} + : alterStatementSuffixFileFormat + | alterStatementSuffixLocation + | alterStatementSuffixProtectMode + | alterStatementSuffixMergeFiles + | alterStatementSuffixSerdeProperties + | alterStatementSuffixRenamePart + | alterStatementSuffixBucketNum + | alterTblPartitionStatementSuffixSkewedLocation + | alterStatementSuffixClusterbySortby + | alterStatementSuffixCompact + | alterStatementSuffixUpdateStatsCol + ; + alterStatementPartitionKeyType @init {msgs.push("alter partition key type"); } @after {msgs.pop();} - : tableName KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN - -> ^(TOK_ALTERTABLE_PARTCOLTYPE tableName columnNameType) + : KW_PARTITION KW_COLUMN LPAREN columnNameType RPAREN + -> ^(TOK_ALTERTABLE_PARTCOLTYPE columnNameType) ; alterViewStatementSuffix @init { pushMsg("alter view statement", state); } @after { popMsg(state); } : alterViewSuffixProperties - | alterStatementSuffixRename - -> ^(TOK_ALTERVIEW_RENAME alterStatementSuffixRename) - | alterStatementSuffixAddPartitions - -> ^(TOK_ALTERVIEW_ADDPARTS alterStatementSuffixAddPartitions) - | alterStatementSuffixDropPartitions - -> ^(TOK_ALTERVIEW_DROPPARTS alterStatementSuffixDropPartitions) - | name=tableName KW_AS selectStatementWithCTE - -> ^(TOK_ALTERVIEW_AS $name selectStatementWithCTE) + | alterStatementSuffixRename[false] + | alterStatementSuffixAddPartitions[false] + | alterStatementSuffixDropPartitions[false] + | selectStatementWithCTE ; alterIndexStatementSuffix @@ -1022,33 +1028,34 @@ alterDatabaseSuffixSetOwner -> ^(TOK_ALTERDATABASE_OWNER $dbName principalName) ; -alterStatementSuffixRename +alterStatementSuffixRename[boolean table] @init { pushMsg("rename statement", state); } @after { popMsg(state); } - : oldName=tableName KW_RENAME KW_TO newName=tableName - -> ^(TOK_ALTERTABLE_RENAME $oldName $newName) + : KW_RENAME KW_TO tableName + -> { table }? ^(TOK_ALTERTABLE_RENAME tableName) + -> ^(TOK_ALTERVIEW_RENAME tableName) ; alterStatementSuffixAddCol @init { pushMsg("add column statement", state); } @after { popMsg(state); } - : tableName (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN - -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS tableName columnNameTypeList) - -> ^(TOK_ALTERTABLE_REPLACECOLS tableName columnNameTypeList) + : (add=KW_ADD | replace=KW_REPLACE) KW_COLUMNS LPAREN columnNameTypeList RPAREN + -> {$add != null}? ^(TOK_ALTERTABLE_ADDCOLS columnNameTypeList) + -> ^(TOK_ALTERTABLE_REPLACECOLS columnNameTypeList) ; alterStatementSuffixRenameCol @init { pushMsg("rename column name", state); } @after { popMsg(state); } - : tableName KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? - ->^(TOK_ALTERTABLE_RENAMECOL tableName $oldName $newName colType $comment? alterStatementChangeColPosition?) + : KW_CHANGE KW_COLUMN? oldName=identifier newName=identifier colType (KW_COMMENT comment=StringLiteral)? alterStatementChangeColPosition? + ->^(TOK_ALTERTABLE_RENAMECOL $oldName $newName colType $comment? alterStatementChangeColPosition?) ; alterStatementSuffixUpdateStatsCol @init { pushMsg("update column statistics", state); } @after { popMsg(state); } - : identifier KW_UPDATE KW_STATISTICS KW_FOR KW_COLUMN? colName=identifier KW_SET tableProperties (KW_COMMENT comment=StringLiteral)? - ->^(TOK_ALTERTABLE_UPDATECOLSTATS identifier $colName tableProperties $comment?) + : KW_UPDATE KW_STATISTICS KW_FOR KW_COLUMN? colName=identifier KW_SET tableProperties (KW_COMMENT comment=StringLiteral)? + ->^(TOK_ALTERTABLE_UPDATECOLSTATS $colName tableProperties $comment?) ; alterStatementChangeColPosition @@ -1057,11 +1064,12 @@ alterStatementChangeColPosition -> ^(TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION $afterCol) ; -alterStatementSuffixAddPartitions +alterStatementSuffixAddPartitions[boolean table] @init { pushMsg("add partition statement", state); } @after { popMsg(state); } - : tableName KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ - -> ^(TOK_ALTERTABLE_ADDPARTS tableName ifNotExists? alterStatementSuffixAddPartitionsElement+) + : KW_ADD ifNotExists? alterStatementSuffixAddPartitionsElement+ + -> { table }? ^(TOK_ALTERTABLE_ADDPARTS ifNotExists? alterStatementSuffixAddPartitionsElement+) + -> ^(TOK_ALTERVIEW_ADDPARTS ifNotExists? alterStatementSuffixAddPartitionsElement+) ; alterStatementSuffixAddPartitionsElement @@ -1071,22 +1079,22 @@ alterStatementSuffixAddPartitionsElement alterStatementSuffixTouch @init { pushMsg("touch statement", state); } @after { popMsg(state); } - : tableName KW_TOUCH (partitionSpec)* - -> ^(TOK_ALTERTABLE_TOUCH tableName (partitionSpec)*) + : KW_TOUCH (partitionSpec)* + -> ^(TOK_ALTERTABLE_TOUCH (partitionSpec)*) ; alterStatementSuffixArchive @init { pushMsg("archive statement", state); } @after { popMsg(state); } - : tableName KW_ARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_ARCHIVE tableName (partitionSpec)*) + : KW_ARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_ARCHIVE (partitionSpec)*) ; alterStatementSuffixUnArchive @init { pushMsg("unarchive statement", state); } @after { popMsg(state); } - : tableName KW_UNARCHIVE (partitionSpec)* - -> ^(TOK_ALTERTABLE_UNARCHIVE tableName (partitionSpec)*) + : KW_UNARCHIVE (partitionSpec)* + -> ^(TOK_ALTERTABLE_UNARCHIVE (partitionSpec)*) ; partitionLocation @@ -1096,29 +1104,30 @@ partitionLocation KW_LOCATION locn=StringLiteral -> ^(TOK_PARTITIONLOCATION $locn) ; -alterStatementSuffixDropPartitions +alterStatementSuffixDropPartitions[boolean table] @init { pushMsg("drop partition statement", state); } @after { popMsg(state); } - : tableName KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? - -> ^(TOK_ALTERTABLE_DROPPARTS tableName dropPartitionSpec+ ifExists? ignoreProtection?) + : KW_DROP ifExists? dropPartitionSpec (COMMA dropPartitionSpec)* ignoreProtection? + -> { table }? ^(TOK_ALTERTABLE_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection?) + -> ^(TOK_ALTERVIEW_DROPPARTS dropPartitionSpec+ ifExists? ignoreProtection?) ; alterStatementSuffixProperties @init { pushMsg("alter properties statement", state); } @after { popMsg(state); } - : tableName KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERTABLE_PROPERTIES tableName tableProperties) - | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPTABLE_PROPERTIES tableName tableProperties ifExists?) + : KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERTABLE_PROPERTIES tableProperties) + | KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_ALTERTABLE_DROPPROPERTIES tableProperties ifExists?) ; alterViewSuffixProperties @init { pushMsg("alter view properties statement", state); } @after { popMsg(state); } - : tableName KW_SET KW_TBLPROPERTIES tableProperties - -> ^(TOK_ALTERVIEW_PROPERTIES tableName tableProperties) - | tableName KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties - -> ^(TOK_DROPVIEW_PROPERTIES tableName tableProperties ifExists?) + : KW_SET KW_TBLPROPERTIES tableProperties + -> ^(TOK_ALTERVIEW_PROPERTIES tableProperties) + | KW_UNSET KW_TBLPROPERTIES ifExists? tableProperties + -> ^(TOK_ALTERVIEW_DROPPROPERTIES tableProperties ifExists?) ; alterStatementSuffixSerdeProperties @@ -1137,29 +1146,6 @@ tablePartitionPrefix ->^(TOK_TABLE_PARTITION tableName partitionSpec?) ; -alterTblPartitionStatement -@init {pushMsg("alter table partition statement", state);} -@after {popMsg(state);} - : tablePartitionPrefix alterTblPartitionStatementSuffix - -> ^(TOK_ALTERTABLE_PARTITION tablePartitionPrefix alterTblPartitionStatementSuffix) - ; - -alterTblPartitionStatementSuffix -@init {pushMsg("alter table partition statement suffix", state);} -@after {popMsg(state);} - : alterStatementSuffixFileFormat - | alterStatementSuffixLocation - | alterStatementSuffixProtectMode - | alterStatementSuffixMergeFiles - | alterStatementSuffixSerdeProperties - | alterStatementSuffixRenamePart - | alterStatementSuffixStatsPart - | alterStatementSuffixBucketNum - | alterTblPartitionStatementSuffixSkewedLocation - | alterStatementSuffixClusterbySortby - | alterStatementSuffixCompact - ; - alterStatementSuffixFileFormat @init {pushMsg("alter fileformat statement", state); } @after {popMsg(state);} @@ -1179,7 +1165,7 @@ alterTblPartitionStatementSuffixSkewedLocation @init {pushMsg("alter partition skewed location", state);} @after {popMsg(state);} : KW_SET KW_SKEWED KW_LOCATION skewedLocations - -> ^(TOK_ALTERTBLPART_SKEWED_LOCATION skewedLocations) + -> ^(TOK_ALTERTABLE_SKEWED_LOCATION skewedLocations) ; skewedLocations @@ -1214,21 +1200,21 @@ alterStatementSuffixLocation alterStatementSuffixSkewedby @init {pushMsg("alter skewed by statement", state);} @after{popMsg(state);} - : tableName tableSkewed - ->^(TOK_ALTERTABLE_SKEWED tableName tableSkewed) + : tableSkewed + ->^(TOK_ALTERTABLE_SKEWED tableSkewed) | - tableName KW_NOT KW_SKEWED - ->^(TOK_ALTERTABLE_SKEWED tableName) + KW_NOT KW_SKEWED + ->^(TOK_ALTERTABLE_SKEWED) | - tableName KW_NOT storedAsDirs - ->^(TOK_ALTERTABLE_SKEWED tableName storedAsDirs) + KW_NOT storedAsDirs + ->^(TOK_ALTERTABLE_SKEWED storedAsDirs) ; alterStatementSuffixExchangePartition @init {pushMsg("alter exchange partition", state);} @after{popMsg(state);} - : tableName KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName - -> ^(TOK_EXCHANGEPARTITION tableName partitionSpec $exchangename) + : KW_EXCHANGE partitionSpec KW_WITH KW_TABLE exchangename=tableName + -> ^(TOK_ALTERTABLE_EXCHANGEPARTITION partitionSpec $exchangename) ; alterStatementSuffixProtectMode @@ -1278,14 +1264,14 @@ alterStatementSuffixBucketNum @init { pushMsg("", state); } @after { popMsg(state); } : KW_INTO num=Number KW_BUCKETS - -> ^(TOK_TABLEBUCKETS $num) + -> ^(TOK_ALTERTABLE_BUCKETS $num) ; alterStatementSuffixCompact @init { msgs.push("compaction request"); } @after { msgs.pop(); } : KW_COMPACT compactType=StringLiteral - -> ^(TOK_COMPACT $compactType) + -> ^(TOK_ALTERTABLE_COMPACT $compactType) ; @@ -1707,7 +1693,7 @@ tableBuckets @after { popMsg(state); } : KW_CLUSTERED KW_BY LPAREN bucketCols=columnNameList RPAREN (KW_SORTED KW_BY LPAREN sortCols=columnNameOrderList RPAREN)? KW_INTO num=Number KW_BUCKETS - -> ^(TOK_TABLEBUCKETS $bucketCols $sortCols? $num) + -> ^(TOK_ALTERTABLE_BUCKETS $bucketCols $sortCols? $num) ; tableSkewed diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java index 8527239..2b239ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java @@ -88,6 +88,7 @@ private void doIndexUpdate(List tblIndexes) throws HiveException { sb.append("ALTER INDEX "); sb.append(idx.getIndexName()); sb.append(" ON "); + sb.append(idx.getDbName()).append('.'); sb.append(idx.getOrigTableName()); sb.append(" REBUILD"); driver.compile(sb.toString(), false); @@ -125,6 +126,7 @@ private void doIndexUpdate(Index index, Map partSpec) throws sb.append("ALTER INDEX "); sb.append(index.getIndexName()); sb.append(" ON "); + sb.append(index.getDbName()).append('.'); sb.append(index.getOrigTableName()); sb.append(" PARTITION "); sb.append(ps.toString()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index b05d3b4..0106707 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -9408,7 +9408,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { // analyze create view command if (ast.getToken().getType() == HiveParser.TOK_CREATEVIEW || - ast.getToken().getType() == HiveParser.TOK_ALTERVIEW_AS) { + (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW && ast.getChild(1).getType() == HiveParser.TOK_QUERY)) { child = analyzeCreateView(ast, qb); SessionState.get().setCommandType(HiveOperation.CREATEVIEW); if (child == null) { @@ -9416,7 +9416,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } viewSelect = child; // prevent view from referencing itself - viewsExpanded.add(SessionState.get().getCurrentDatabase() + "." + createVwDesc.getViewName()); + viewsExpanded.add(createVwDesc.getViewName()); } // continue analyzing from the child ASTNode. @@ -9517,6 +9517,11 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { LOG.info("Completed plan generation"); + // put accessed columns to readEntity + if (HiveConf.getBoolVar(this.conf, HiveConf.ConfVars.HIVE_STATS_COLLECT_SCANCOLS)) { + putAccessedColumnsToReadEntity(inputs, columnAccessInfo); + } + if (!ctx.getExplain()) { // if desired check we're not going over partition scan limits enforceScanLimits(pCtx, origFetchTask); @@ -9525,6 +9530,26 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { return; } + private void putAccessedColumnsToReadEntity(HashSet inputs, ColumnAccessInfo columnAccessInfo) { + Map> tableToColumnAccessMap = columnAccessInfo.getTableToColumnAccessMap(); + if (tableToColumnAccessMap != null && !tableToColumnAccessMap.isEmpty()) { + for(ReadEntity entity: inputs) { + switch (entity.getType()) { + case TABLE: + entity.getAccessedColumns().addAll( + tableToColumnAccessMap.get(entity.getTable().getCompleteName())); + break; + case PARTITION: + entity.getAccessedColumns().addAll( + tableToColumnAccessMap.get(entity.getPartition().getTable().getCompleteName())); + break; + default: + // no-op + } + } + } + } + private void enforceScanLimits(ParseContext pCtx, FetchTask fTask) throws SemanticException { int scanLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITTABLESCANPARTITION); @@ -9998,7 +10023,9 @@ public RowResolver getRowResolver(Operator opt) { */ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualifiedTabName = getQualifiedTableName((ASTNode) ast.getChild(0)); + String dbDotTab = getDotName(qualifiedTabName); + String likeTableName = null; List cols = new ArrayList(); List partCols = new ArrayList(); @@ -10024,7 +10051,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) RowFormatParams rowFormatParams = new RowFormatParams(); StorageFormat storageFormat = new StorageFormat(conf); - LOG.info("Creating table " + tableName + " position=" + LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine()); int numCh = ast.getChildCount(); @@ -10096,7 +10123,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case HiveParser.TOK_TABLEPARTCOLS: partCols = getColumns((ASTNode) child.getChild(0), false); break; - case HiveParser.TOK_TABLEBUCKETS: + case HiveParser.TOK_ALTERTABLE_BUCKETS: bucketCols = getColumnNames((ASTNode) child.getChild(0)); if (child.getChildCount() == 2) { numBuckets = (Integer.valueOf(child.getChild(1).getText())) @@ -10155,7 +10182,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) // check for existence of table if (ifNotExists) { try { - Table table = getTable(tableName, false); + Table table = getTable(qualifiedTabName, false); if (table != null) { // table exists return null; } @@ -10165,11 +10192,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) } } - String[] qualified = Hive.getQualifiedNames(tableName); - String dbName = qualified.length == 1 ? SessionState.get().getCurrentDatabase() : qualified[0]; - Database database = getDatabase(dbName); - outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); - outputs.add(new WriteEntity(new Table(dbName, tableName), WriteEntity.WriteType.DDL_NO_LOCK)); + addDbAndTabToOutputs(qualifiedTabName); if (isTemporary) { if (partCols.size() > 0) { @@ -10198,7 +10221,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) case CREATE_TABLE: // REGULAR CREATE TABLE DDL tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(tableName, isExt, isTemporary, cols, partCols, + crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, @@ -10227,7 +10250,7 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) + "and source table in CREATE TABLE LIKE is partitioned."); } } - CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, isTemporary, + CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName); @@ -10240,9 +10263,9 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) // Verify that the table does not already exist try { - Table dumpTable = db.newTable(tableName); + Table dumpTable = db.newTable(dbDotTab); if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false)) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tableName)); + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab)); } } catch (HiveException e) { throw new SemanticException(e); @@ -10250,11 +10273,10 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) tblProps = addDefaultProperties(tblProps); - crtTblDesc = new CreateTableDesc(dbName, tableName, isExt, isTemporary, cols, partCols, - bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, - rowFormatParams.fieldEscape, - rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, - comment, storageFormat.getInputFormat(), + crtTblDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, + partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, + rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, + rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, skewedValues); @@ -10271,9 +10293,17 @@ private ASTNode analyzeCreateTable(ASTNode ast, QB qb) return null; } + private void addDbAndTabToOutputs(String[] qualifiedTabName) throws SemanticException { + Database database = getDatabase(qualifiedTabName[0]); + outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); + outputs.add(new WriteEntity(new Table(qualifiedTabName[0], qualifiedTabName[1]), + WriteEntity.WriteType.DDL_NO_LOCK)); + } + private ASTNode analyzeCreateView(ASTNode ast, QB qb) throws SemanticException { - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); + String[] qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0)); + String dbDotTable = getDotName(qualTabName); List cols = null; boolean ifNotExists = false; boolean orReplace = false; @@ -10283,7 +10313,7 @@ private ASTNode analyzeCreateView(ASTNode ast, QB qb) Map tblProps = null; List partColNames = null; - LOG.info("Creating view " + tableName + " position=" + LOG.info("Creating view " + dbDotTable + " position=" + ast.getCharPositionInLine()); int numCh = ast.getChildCount(); for (int num = 1; num < numCh; num++) { @@ -10319,19 +10349,21 @@ private ASTNode analyzeCreateView(ASTNode ast, QB qb) throw new SemanticException("Can't combine IF NOT EXISTS and OR REPLACE."); } - if (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW_AS) { + if (ast.getToken().getType() == HiveParser.TOK_ALTERVIEW && + ast.getChild(1).getType() == HiveParser.TOK_QUERY) { isAlterViewAs = true; orReplace = true; } createVwDesc = new CreateViewDesc( - tableName, cols, comment, tblProps, partColNames, + dbDotTable, cols, comment, tblProps, partColNames, ifNotExists, orReplace, isAlterViewAs); unparseTranslator.enable(); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc), conf)); + addDbAndTabToOutputs(qualTabName); return selectStmt; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java index 3dfce99..026efe8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java @@ -20,6 +20,7 @@ import java.util.HashMap; +import org.antlr.runtime.tree.Tree; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.session.SessionState; @@ -57,7 +58,7 @@ commandType.put(HiveParser.TOK_ALTERTABLE_ARCHIVE, HiveOperation.ALTERTABLE_ARCHIVE); commandType.put(HiveParser.TOK_ALTERTABLE_UNARCHIVE, HiveOperation.ALTERTABLE_UNARCHIVE); commandType.put(HiveParser.TOK_ALTERTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); - commandType.put(HiveParser.TOK_DROPTABLE_PROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERTABLE_DROPPROPERTIES, HiveOperation.ALTERTABLE_PROPERTIES); commandType.put(HiveParser.TOK_SHOWDATABASES, HiveOperation.SHOWDATABASES); commandType.put(HiveParser.TOK_SHOWTABLES, HiveOperation.SHOWTABLES); commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS); @@ -81,9 +82,11 @@ commandType.put(HiveParser.TOK_ALTERINDEX_REBUILD, HiveOperation.ALTERINDEX_REBUILD); commandType.put(HiveParser.TOK_ALTERINDEX_PROPERTIES, HiveOperation.ALTERINDEX_PROPS); commandType.put(HiveParser.TOK_ALTERVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); - commandType.put(HiveParser.TOK_DROPVIEW_PROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); + commandType.put(HiveParser.TOK_ALTERVIEW_DROPPROPERTIES, HiveOperation.ALTERVIEW_PROPERTIES); commandType.put(HiveParser.TOK_ALTERVIEW_ADDPARTS, HiveOperation.ALTERTABLE_ADDPARTS); commandType.put(HiveParser.TOK_ALTERVIEW_DROPPARTS, HiveOperation.ALTERTABLE_DROPPARTS); + commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, HiveOperation.ALTERVIEW_RENAME); + commandType.put(HiveParser.TOK_ALTERVIEW, HiveOperation.ALTERVIEW_AS); commandType.put(HiveParser.TOK_QUERY, HiveOperation.QUERY); commandType.put(HiveParser.TOK_LOCKTABLE, HiveOperation.LOCKTABLE); commandType.put(HiveParser.TOK_UNLOCKTABLE, HiveOperation.UNLOCKTABLE); @@ -105,11 +108,9 @@ commandType.put(HiveParser.TOK_DESCDATABASE, HiveOperation.DESCDATABASE); commandType.put(HiveParser.TOK_ALTERTABLE_SKEWED, HiveOperation.ALTERTABLE_SKEWED); commandType.put(HiveParser.TOK_ANALYZE, HiveOperation.ANALYZE_TABLE); - commandType.put(HiveParser.TOK_ALTERVIEW_RENAME, HiveOperation.ALTERVIEW_RENAME); commandType.put(HiveParser.TOK_ALTERTABLE_PARTCOLTYPE, HiveOperation.ALTERTABLE_PARTCOLTYPE); commandType.put(HiveParser.TOK_SHOW_COMPACTIONS, HiveOperation.SHOW_COMPACTIONS); commandType.put(HiveParser.TOK_SHOW_TRANSACTIONS, HiveOperation.SHOW_TRANSACTIONS); - commandType.put(HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS, HiveOperation.ALTERTABLE_UPDATETABLESTATS); } static { @@ -134,17 +135,20 @@ HiveOperation.ALTERPARTITION_SERDEPROPERTIES }); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_RENAMEPART, new HiveOperation[] {null, HiveOperation.ALTERTABLE_RENAMEPART}); - tablePartitionCommandType.put(HiveParser.TOK_COMPACT, + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_COMPACT, new HiveOperation[] {HiveOperation.ALTERTABLE_COMPACT, HiveOperation.ALTERTABLE_COMPACT}); - tablePartitionCommandType.put(HiveParser.TOK_ALTERTBLPART_SKEWED_LOCATION, + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION, new HiveOperation[] {HiveOperation.ALTERTBLPART_SKEWED_LOCATION, HiveOperation.ALTERTBLPART_SKEWED_LOCATION }); - tablePartitionCommandType.put(HiveParser.TOK_TABLEBUCKETS, + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_BUCKETS, new HiveOperation[] {HiveOperation.ALTERTABLE_BUCKETNUM, HiveOperation.ALTERPARTITION_BUCKETNUM}); tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_CLUSTER_SORT, new HiveOperation[] {HiveOperation.ALTERTABLE_CLUSTER_SORT, HiveOperation.ALTERTABLE_CLUSTER_SORT}); + tablePartitionCommandType.put(HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS, + new HiveOperation[] {HiveOperation.ALTERTABLE_UPDATETABLESTATS, + HiveOperation.ALTERTABLE_UPDATEPARTSTATS}); } public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) @@ -152,9 +156,9 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) if (tree.getToken() == null) { throw new RuntimeException("Empty Syntax Tree"); } else { - setSessionCommandType(commandType.get(tree.getToken().getType())); + setSessionCommandType(commandType.get(tree.getType())); - switch (tree.getToken().getType()) { + switch (tree.getType()) { case HiveParser.TOK_EXPLAIN: return new ExplainSemanticAnalyzer(conf); case HiveParser.TOK_EXPLAIN_SQ_REWRITE: @@ -165,6 +169,47 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) return new ExportSemanticAnalyzer(conf); case HiveParser.TOK_IMPORT: return new ImportSemanticAnalyzer(conf); + case HiveParser.TOK_ALTERTABLE: { + Tree child = tree.getChild(1); + switch (child.getType()) { + case HiveParser.TOK_ALTERTABLE_RENAME: + case HiveParser.TOK_ALTERTABLE_TOUCH: + case HiveParser.TOK_ALTERTABLE_ARCHIVE: + case HiveParser.TOK_ALTERTABLE_UNARCHIVE: + case HiveParser.TOK_ALTERTABLE_ADDCOLS: + case HiveParser.TOK_ALTERTABLE_RENAMECOL: + case HiveParser.TOK_ALTERTABLE_REPLACECOLS: + case HiveParser.TOK_ALTERTABLE_DROPPARTS: + case HiveParser.TOK_ALTERTABLE_ADDPARTS: + case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: + case HiveParser.TOK_ALTERTABLE_PROPERTIES: + case HiveParser.TOK_ALTERTABLE_DROPPROPERTIES: + case HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION: + case HiveParser.TOK_ALTERTABLE_SKEWED: + setSessionCommandType(commandType.get(child.getType())); + return new DDLSemanticAnalyzer(conf); + } + HiveOperation commandType = + tablePartitionCommandType.get(child.getType())[tree.getChildCount() > 2 ? 1 : 0]; + setSessionCommandType(commandType); + return new DDLSemanticAnalyzer(conf); + } + case HiveParser.TOK_ALTERVIEW: { + Tree child = tree.getChild(1); + switch (child.getType()) { + case HiveParser.TOK_ALTERVIEW_PROPERTIES: + case HiveParser.TOK_ALTERVIEW_DROPPROPERTIES: + case HiveParser.TOK_ALTERVIEW_ADDPARTS: + case HiveParser.TOK_ALTERVIEW_DROPPARTS: + case HiveParser.TOK_ALTERVIEW_RENAME: + setSessionCommandType(commandType.get(child.getType())); + return new DDLSemanticAnalyzer(conf); + } + // TOK_ALTERVIEW_AS + assert child.getType() == HiveParser.TOK_QUERY; + setSessionCommandType(HiveOperation.ALTERVIEW_AS); + return new SemanticAnalyzer(conf); + } case HiveParser.TOK_CREATEDATABASE: case HiveParser.TOK_DROPDATABASE: case HiveParser.TOK_SWITCHDATABASE: @@ -174,24 +219,8 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_DESCTABLE: case HiveParser.TOK_DESCFUNCTION: case HiveParser.TOK_MSCK: - case HiveParser.TOK_ALTERTABLE_ADDCOLS: - case HiveParser.TOK_ALTERTABLE_RENAMECOL: - case HiveParser.TOK_ALTERTABLE_REPLACECOLS: - case HiveParser.TOK_ALTERTABLE_RENAME: - case HiveParser.TOK_ALTERTABLE_DROPPARTS: - case HiveParser.TOK_ALTERTABLE_ADDPARTS: - case HiveParser.TOK_ALTERTABLE_PROPERTIES: - case HiveParser.TOK_DROPTABLE_PROPERTIES: - case HiveParser.TOK_ALTERTABLE_SERIALIZER: - case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES: - case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE: case HiveParser.TOK_ALTERINDEX_REBUILD: case HiveParser.TOK_ALTERINDEX_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_PROPERTIES: - case HiveParser.TOK_DROPVIEW_PROPERTIES: - case HiveParser.TOK_ALTERVIEW_ADDPARTS: - case HiveParser.TOK_ALTERVIEW_DROPPARTS: - case HiveParser.TOK_ALTERVIEW_RENAME: case HiveParser.TOK_SHOWDATABASES: case HiveParser.TOK_SHOWTABLES: case HiveParser.TOK_SHOWCOLUMNS: @@ -209,9 +238,6 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_CREATEINDEX: case HiveParser.TOK_DROPINDEX: case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT: - case HiveParser.TOK_ALTERTABLE_TOUCH: - case HiveParser.TOK_ALTERTABLE_ARCHIVE: - case HiveParser.TOK_ALTERTABLE_UNARCHIVE: case HiveParser.TOK_LOCKTABLE: case HiveParser.TOK_UNLOCKTABLE: case HiveParser.TOK_LOCKDB: @@ -228,23 +254,8 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree) case HiveParser.TOK_SHOW_ROLES: case HiveParser.TOK_ALTERDATABASE_PROPERTIES: case HiveParser.TOK_ALTERDATABASE_OWNER: - case HiveParser.TOK_ALTERTABLE_SKEWED: case HiveParser.TOK_TRUNCATETABLE: - case HiveParser.TOK_EXCHANGEPARTITION: case HiveParser.TOK_SHOW_SET_ROLE: - case HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS: - return new DDLSemanticAnalyzer(conf); - case HiveParser.TOK_ALTERTABLE_PARTITION: - HiveOperation commandType = null; - Integer type = ((ASTNode) tree.getChild(1)).getToken().getType(); - if (type == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS) { - commandType = HiveOperation.ALTERTABLE_UPDATEPARTSTATS; - } else if (tree.getChild(0).getChildCount() > 1) { - commandType = tablePartitionCommandType.get(type)[1]; - } else { - commandType = tablePartitionCommandType.get(type)[0]; - } - setSessionCommandType(commandType); return new DDLSemanticAnalyzer(conf); case HiveParser.TOK_CREATEFUNCTION: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index a8d9a15..0f714b5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -21,6 +21,7 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; @@ -227,11 +228,19 @@ public void compile(final ParseContext pCtx, final List outIter = outputs.iterator(); + while (outIter.hasNext()) { + switch (outIter.next().getType()) { + case DFS_DIR: + case LOCAL_DIR: + outIter.remove(); + break; + default: + break; + } + } Task crtTblTask = TaskFactory.get(new DDLWork( inputs, outputs, crtTblDesc), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index f92ecf2..10ef7e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -206,7 +206,7 @@ public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { List roles = new ArrayList(); for (int i = rolesStartPos; i < ast.getChildCount(); i++) { - roles.add(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(i).getText()).toLowerCase()); + roles.add(BaseSemanticAnalyzer.unescapeIdentifier(ast.getChild(i).getText())); } String roleOwnerName = SessionState.getUserFromAuthenticator(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 20d863b..8517319 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -44,11 +44,19 @@ * */ public static enum AlterTableTypes { - RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, DROPPROPS, ADDSERDE, ADDSERDEPROPS, - ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, - TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, - ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION, - ALTERBUCKETNUM, ALTERPARTITION, COMPACT + RENAME("rename"), ADDCOLS("add columns"), REPLACECOLS("replace columns"), + ADDPROPS("add props"), DROPPROPS("drop props"), ADDSERDE("add serde"), ADDSERDEPROPS("add serde props"), + ADDFILEFORMAT("add fileformat"), ADDCLUSTERSORTCOLUMN("add cluster sort column"), + RENAMECOLUMN("rename column"), ADDPARTITION("add partition"), TOUCH("touch"), ARCHIVE("archieve"), + UNARCHIVE("unarchieve"), ALTERPROTECTMODE("alter protect mode"), + ALTERPARTITIONPROTECTMODE("alter partition protect mode"), ALTERLOCATION("alter location"), + DROPPARTITION("drop partition"), RENAMEPARTITION("rename partition"), ADDSKEWEDBY("add skew column"), + ALTERSKEWEDLOCATION("alter skew location"), ALTERBUCKETNUM("alter bucket number"), + ALTERPARTITION("alter partition"), COMPACT("compact"); + + private final String name; + private AlterTableTypes(String name) { this.name = name; } + public String getName() { return name; } } public static enum ProtectModeType { @@ -236,16 +244,7 @@ public AlterTableDesc(String tableName, HashMap partSpec, int nu @Explain(displayName = "type") public String getAlterTableTypeString() { - switch (op) { - case RENAME: - return "rename"; - case ADDCOLS: - return "add columns"; - case REPLACECOLS: - return "replace columns"; - } - - return "unknown"; + return op.getName(); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 67be666..75cdf16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -114,8 +114,8 @@ new Privilege[] {Privilege.ALTER_DATA}, null), ALTERTABLE_PARTCOLTYPE("ALTERTABLE_PARTCOLTYPE", new Privilege[] { Privilege.SELECT }, new Privilege[] { Privilege.ALTER_DATA }), ALTERVIEW_RENAME("ALTERVIEW_RENAME", new Privilege[] {Privilege.ALTER_METADATA}, null), - ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, - new Privilege[]{Privilege.ALTER_DATA}), + ALTERVIEW_AS("ALTERVIEW_AS", new Privilege[] {Privilege.ALTER_METADATA}, null), + ALTERTABLE_COMPACT("ALTERTABLE_COMPACT", new Privilege[]{Privilege.SELECT}, new Privilege[]{Privilege.ALTER_DATA}), SHOW_COMPACTIONS("SHOW COMPACTIONS", null, null), SHOW_TRANSACTIONS("SHOW TRANSACTIONS", null, null); ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 5b48ea2..6dce30c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -347,7 +347,7 @@ public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, if (crtTblDesc.getTableName() != null && crtTblDesc.getDatabaseName() != null) { properties.setProperty(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, - crtTblDesc.getDatabaseName() + "." + crtTblDesc.getTableName()); + crtTblDesc.getTableName()); } if (crtTblDesc.getTblProps() != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java index 8413fb7..2aae751 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java @@ -102,8 +102,7 @@ public RoleDDLDesc(String roleName, RoleOperation operation) { public RoleDDLDesc(String principalName, PrincipalType principalType, RoleOperation operation, String roleOwnerName) { - this.name = (principalName != null && principalType == PrincipalType.ROLE) ? - principalName.toLowerCase() : principalName; + this.name = principalName; this.principalType = principalType; this.operation = operation; this.roleOwnerName = roleOwnerName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java index 2113f45..93df9f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java @@ -67,9 +67,6 @@ public static HivePrincipalType getHivePrincipalType(PrincipalType type) throws case ROLE: return HivePrincipalType.ROLE; case GROUP: - if (SessionState.get().getAuthorizationMode() == SessionState.AuthorizationMode.V2) { - throw new HiveException(ErrorMsg.UNSUPPORTED_AUTHORIZATION_PRINCIPAL_TYPE_GROUP); - } return HivePrincipalType.GROUP; default: //should not happen as we take care of all existing types diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java index ddbe30c..5786277 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java @@ -21,7 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.security.AccessControlException; -import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.EnumSet; import java.util.List; @@ -34,12 +34,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -48,7 +45,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.shims.ShimLoader; /** * StorageBasedAuthorizationProvider is an implementation of @@ -141,28 +137,77 @@ public void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPri public void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { Path path = getDbLocation(db); + + // extract drop privileges + DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, + writeRequiredPriv); + readRequiredPriv = privExtractor.getReadReqPriv(); + writeRequiredPriv = privExtractor.getWriteReqPriv(); + + // authorize drops if there was a drop privilege requirement + if(privExtractor.hasDropPrivilege()) { + checkDeletePermission(path, getConf(), authenticator.getUserName()); + } + authorize(path, readRequiredPriv, writeRequiredPriv); } @Override public void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { - - // To create/drop/alter a table, the owner should have WRITE permission on the database directory - authorize(hive_db.getDatabase(table.getDbName()), readRequiredPriv, writeRequiredPriv); - - // If the user has specified a location - external or not, check if the user has the try { initWh(); - String location = table.getTTable().getSd().getLocation(); - if (location != null && !location.isEmpty()) { - authorize(new Path(location), readRequiredPriv, writeRequiredPriv); - } } catch (MetaException ex) { throw hiveException(ex); } + + // extract any drop privileges out of required privileges + DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, + writeRequiredPriv); + readRequiredPriv = privExtractor.getReadReqPriv(); + writeRequiredPriv = privExtractor.getWriteReqPriv(); + + // if CREATE or DROP priv requirement is there, the owner should have WRITE permission on + // the database directory + if (privExtractor.hasDropPrivilege || requireCreatePrivilege(readRequiredPriv) + || requireCreatePrivilege(writeRequiredPriv)) { + authorize(hive_db.getDatabase(table.getDbName()), new Privilege[] {}, + new Privilege[] { Privilege.ALTER_DATA }); + } + + Path path = table.getDataLocation(); + // authorize drops if there was a drop privilege requirement, and + // table is not external (external table data is not dropped) + if (privExtractor.hasDropPrivilege() && table.getTableType() != TableType.EXTERNAL_TABLE) { + checkDeletePermission(path, getConf(), authenticator.getUserName()); + } + + // If the user has specified a location - external or not, check if the user + // has the permissions on the table dir + if (path != null) { + authorize(path, readRequiredPriv, writeRequiredPriv); + } } + + /** + * + * @param privs + * @return true, if set of given privileges privs contain CREATE privilege + */ + private boolean requireCreatePrivilege(Privilege[] privs) { + if(privs == null) { + return false; + } + for (Privilege priv : privs) { + if (priv.equals(Privilege.CREATE)) { + return true; + } + } + return false; + } + + @Override public void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { @@ -173,17 +218,39 @@ private void authorize(Table table, Partition part, Privilege[] readRequiredPriv Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException { + // extract drop privileges + DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv, + writeRequiredPriv); + readRequiredPriv = privExtractor.getReadReqPriv(); + writeRequiredPriv = privExtractor.getWriteReqPriv(); + + // authorize drops if there was a drop privilege requirement + if(privExtractor.hasDropPrivilege()) { + checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName()); + } + // Partition path can be null in the case of a new create partition - in this case, // we try to default to checking the permissions of the parent table. // Partition itself can also be null, in cases where this gets called as a generic // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887) if ((part == null) || (part.getLocation() == null)) { - authorize(table, readRequiredPriv, writeRequiredPriv); + // this should be the case only if this is a create partition. + // The privilege needed on the table should be ALTER_DATA, and not CREATE + authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA}); } else { authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv); } } + private void checkDeletePermission(Path dataLocation, Configuration conf, String userName) + throws HiveException { + try { + FileUtils.checkDeletePermission(dataLocation, conf, userName); + } catch (Exception e) { + throw new HiveException(e); + } + } + @Override public void authorize(Table table, Partition part, List columns, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, @@ -191,11 +258,7 @@ public void authorize(Table table, Partition part, List columns, // In a simple storage-based auth, we have no information about columns // living in different files, so we do simple partition-auth and ignore // the columns parameter. - if ((part != null) && (part.getTable() != null)) { - authorize(part.getTable(), part, readRequiredPriv, writeRequiredPriv); - } else { - authorize(table, part, readRequiredPriv, writeRequiredPriv); - } + authorize(table, part, readRequiredPriv, writeRequiredPriv); } @Override @@ -373,4 +436,48 @@ public void authorizeAuthorizationApiInvocation() throws HiveException, Authoriz // no-op - SBA does not attempt to authorize auth api call. Allow it } + public class DropPrivilegeExtractor { + + private boolean hasDropPrivilege = false; + private final Privilege[] readReqPriv; + private final Privilege[] writeReqPriv; + + public DropPrivilegeExtractor(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) { + this.readReqPriv = extractDropPriv(readRequiredPriv); + this.writeReqPriv = extractDropPriv(writeRequiredPriv); + } + + private Privilege[] extractDropPriv(Privilege[] requiredPrivs) { + if (requiredPrivs == null) { + return null; + } + List privList = new ArrayList(); + for (Privilege priv : requiredPrivs) { + if (priv.equals(Privilege.DROP)) { + hasDropPrivilege = true; + } else { + privList.add(priv); + } + } + return privList.toArray(new Privilege[0]); + } + + public boolean hasDropPrivilege() { + return hasDropPrivilege; + } + + public void setHasDropPrivilege(boolean hasDropPrivilege) { + this.hasDropPrivilege = hasDropPrivilege; + } + + public Privilege[] getReadReqPriv() { + return readReqPriv; + } + + public Privilege[] getWriteReqPriv() { + return writeReqPriv; + } + + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index 29ae4a0..c4469a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -114,6 +114,7 @@ ALTERTABLE_SKEWED, ALTERTBLPART_SKEWED_LOCATION, ALTERVIEW_RENAME, + ALTERVIEW_AS, ALTERTABLE_COMPACT, SHOW_COMPACTIONS, SHOW_TRANSACTIONS, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java index 30a4496..d8f530b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java @@ -50,16 +50,9 @@ public String toString() { public HivePrincipal(String name, HivePrincipalType type){ this.type = type; - if (type == HivePrincipalType.ROLE) { - // lower case role to make operations on it case insensitive - // when the old default authorization gets deprecated, this can move - // to ObjectStore code base - this.name = name.toLowerCase(); - } else { - this.name = name; - } - + this.name = name; } + public String getName() { return name; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index 45404fe..ebe67d9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -246,6 +246,8 @@ public HivePrivilegeObjectType getObjectType() { (OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.ALTERVIEW_RENAME, PrivRequirement.newIOPrivRequirement (OWNER_PRIV_AR, OWNER_PRIV_AR)); + op2Priv.put(HiveOperationType.ALTERVIEW_AS, PrivRequirement.newIOPrivRequirement +(OWNER_PRIV_AR, OWNER_PRIV_AR)); op2Priv.put(HiveOperationType.DROPVIEW, PrivRequirement.newIOPrivRequirement (OWNER_PRIV_AR, OWNER_PRIV_AR)); @@ -276,8 +278,9 @@ public HivePrivilegeObjectType getObjectType() { (SEL_NOGRANT_AR, null)); op2Priv.put(HiveOperationType.SHOW_TBLPROPERTIES, PrivRequirement.newIOPrivRequirement (SEL_NOGRANT_AR, null)); - op2Priv.put(HiveOperationType.CREATETABLE_AS_SELECT, PrivRequirement.newIOPrivRequirement -(SEL_NOGRANT_AR, null)); + op2Priv.put(HiveOperationType.CREATETABLE_AS_SELECT, PrivRequirement.newPrivRequirementList( + new PrivRequirement(SEL_NOGRANT_AR, IOType.INPUT), + new PrivRequirement(OWNER_PRIV_AR, HivePrivilegeObjectType.DATABASE))); // QUERY,LOAD op can contain an insert & overwrite, // require delete privilege if this is an insert-overwrite @@ -300,8 +303,9 @@ public HivePrivilegeObjectType getObjectType() { // for now allow only create-view with 'select with grant' // the owner will also have select with grant privileges on new view - op2Priv.put(HiveOperationType.CREATEVIEW, PrivRequirement.newIOPrivRequirement -(SEL_GRANT_AR, null)); + op2Priv.put(HiveOperationType.CREATEVIEW, PrivRequirement.newPrivRequirementList( + new PrivRequirement(SEL_GRANT_AR, IOType.INPUT), + new PrivRequirement(OWNER_PRIV_AR, HivePrivilegeObjectType.DATABASE))); op2Priv.put(HiveOperationType.SHOWFUNCTIONS, PrivRequirement.newIOPrivRequirement (null, null)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java index a6b008a..96c4b48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLAuthorizationUtils.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -25,6 +24,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.ListIterator; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -416,4 +416,43 @@ static HiveAuthzPluginException getPluginException(String prefix, Exception e) { return new HiveAuthzPluginException(prefix + ": " + e.getMessage(), e); } + /** + * Validate the principal type, and convert role name to lower case + * @param hPrincipal + * @return validated principal + * @throws HiveAuthzPluginException + */ + public static HivePrincipal getValidatedPrincipal(HivePrincipal hPrincipal) + throws HiveAuthzPluginException { + if (hPrincipal == null || hPrincipal.getType() == null) { + // null principal + return hPrincipal; + } + switch (hPrincipal.getType()) { + case USER: + return hPrincipal; + case ROLE: + // lower case role names, for case insensitive behavior + return new HivePrincipal(hPrincipal.getName().toLowerCase(), hPrincipal.getType()); + default: + throw new HiveAuthzPluginException("Invalid principal type in principal " + hPrincipal); + } + } + + /** + * Calls getValidatedPrincipal on each principal in list and updates the list + * @param hivePrincipals + * @return + * @return + * @throws HiveAuthzPluginException + */ + public static List getValidatedPrincipals(List hivePrincipals) + throws HiveAuthzPluginException { + ListIterator it = hivePrincipals.listIterator(); + while(it.hasNext()){ + it.set(getValidatedPrincipal(it.next())); + } + return hivePrincipals; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java new file mode 100644 index 0000000..29c1034 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessControllerWrapper.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd; + +import java.util.List; +import java.util.ListIterator; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessController; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrincipal; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilege; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeInfo; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveRoleGrant; + +/** + * Wrapper for {@link SQLStdHiveAccessController} that does validation of + * arguments and then calls the real object. Doing the validation in this + * separate class, so that the chances of missing any validation is small. + * + * Validations/Conversions to be done + * 1. Call SQLAuthorizationUtils.getValidatedPrincipals on HivePrincipal to validate and + * update + * 2. Convert roleName to lower case + * + */ + +@Private +public class SQLStdHiveAccessControllerWrapper implements HiveAccessController { + + private final SQLStdHiveAccessController hiveAccessController; + + public SQLStdHiveAccessControllerWrapper(HiveMetastoreClientFactory metastoreClientFactory, + HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) + throws HiveAuthzPluginException { + this.hiveAccessController = new SQLStdHiveAccessController(metastoreClientFactory, conf, + authenticator, ctx); + } + + @Override + public void grantPrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + // validate principals + hivePrincipals = SQLAuthorizationUtils.getValidatedPrincipals(hivePrincipals); + grantorPrincipal = SQLAuthorizationUtils.getValidatedPrincipal(grantorPrincipal); + + hiveAccessController.grantPrivileges(hivePrincipals, hivePrivileges, hivePrivObject, + grantorPrincipal, grantOption); + + } + + @Override + public void revokePrivileges(List hivePrincipals, + List hivePrivileges, HivePrivilegeObject hivePrivObject, + HivePrincipal grantorPrincipal, boolean grantOption) throws HiveAuthzPluginException, + HiveAccessControlException { + // validate principals + hivePrincipals = SQLAuthorizationUtils.getValidatedPrincipals(hivePrincipals); + grantorPrincipal = SQLAuthorizationUtils.getValidatedPrincipal(grantorPrincipal); + + hiveAccessController.revokePrivileges(hivePrincipals, hivePrivileges, hivePrivObject, + grantorPrincipal, grantOption); + } + + @Override + public void createRole(String roleName, HivePrincipal adminGrantor) + throws HiveAuthzPluginException, HiveAccessControlException { + // validate principals + roleName = roleName.toLowerCase(); + adminGrantor = SQLAuthorizationUtils.getValidatedPrincipal(adminGrantor); + + hiveAccessController.createRole(roleName, adminGrantor); + } + + @Override + public void dropRole(String roleName) throws HiveAuthzPluginException, HiveAccessControlException { + // lower case roleName + roleName = roleName.toLowerCase(); + + hiveAccessController.dropRole(roleName); + } + + @Override + public void grantRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + // validate principals + hivePrincipals = SQLAuthorizationUtils.getValidatedPrincipals(hivePrincipals); + roles = getLowerCaseRoleNames(roles); + grantorPrinc = SQLAuthorizationUtils.getValidatedPrincipal(grantorPrinc); + + hiveAccessController.grantRole(hivePrincipals, roles, grantOption, grantorPrinc); + } + + @Override + public void revokeRole(List hivePrincipals, List roles, + boolean grantOption, HivePrincipal grantorPrinc) throws HiveAuthzPluginException, + HiveAccessControlException { + // validate + hivePrincipals = SQLAuthorizationUtils.getValidatedPrincipals(hivePrincipals); + roles = getLowerCaseRoleNames(roles); + grantorPrinc = SQLAuthorizationUtils.getValidatedPrincipal(grantorPrinc); + + hiveAccessController.revokeRole(hivePrincipals, roles, grantOption, grantorPrinc); + } + + @Override + public List getAllRoles() throws HiveAuthzPluginException, HiveAccessControlException { + return hiveAccessController.getAllRoles(); + } + + @Override + public List showPrivileges(HivePrincipal principal, HivePrivilegeObject privObj) + throws HiveAuthzPluginException, HiveAccessControlException { + // validate + principal = SQLAuthorizationUtils.getValidatedPrincipal(principal); + + return hiveAccessController.showPrivileges(principal, privObj); + } + + @Override + public void setCurrentRole(String roleName) throws HiveAuthzPluginException, + HiveAccessControlException { + // validate + roleName = roleName.toLowerCase(); + + hiveAccessController.setCurrentRole(roleName); + } + + @Override + public List getCurrentRoleNames() throws HiveAuthzPluginException { + return hiveAccessController.getCurrentRoleNames(); + } + + @Override + public List getPrincipalGrantInfoForRole(String roleName) + throws HiveAuthzPluginException, HiveAccessControlException { + // validate + roleName = roleName.toLowerCase(); + + return hiveAccessController.getPrincipalGrantInfoForRole(roleName); + } + + @Override + public List getRoleGrantInfoForPrincipal(HivePrincipal principal) + throws HiveAuthzPluginException, HiveAccessControlException { + // validate + principal = SQLAuthorizationUtils.getValidatedPrincipal(principal); + + return hiveAccessController.getRoleGrantInfoForPrincipal(principal); + } + + @Override + public void applyAuthorizationConfigPolicy(HiveConf hiveConf) { + hiveAccessController.applyAuthorizationConfigPolicy(hiveConf); + } + + public boolean isUserAdmin() throws HiveAuthzPluginException { + return hiveAccessController.isUserAdmin(); + } + + private List getLowerCaseRoleNames(List roles) { + ListIterator roleIter = roles.listIterator(); + while (roleIter.hasNext()) { + roleIter.set(roleIter.next().toLowerCase()); + } + return roles; + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java index 9ceac0c..4555a71 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizationValidator.java @@ -43,17 +43,17 @@ private final HiveMetastoreClientFactory metastoreClientFactory; private final HiveConf conf; private final HiveAuthenticationProvider authenticator; - private final SQLStdHiveAccessController privController; + private final SQLStdHiveAccessControllerWrapper privController; public static final Log LOG = LogFactory.getLog(SQLStdHiveAuthorizationValidator.class); public SQLStdHiveAuthorizationValidator(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, - SQLStdHiveAccessController privController) { + SQLStdHiveAccessControllerWrapper privilegeManager) { this.metastoreClientFactory = metastoreClientFactory; this.conf = conf; this.authenticator = authenticator; - this.privController = privController; + this.privController = privilegeManager; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java index 9db3d74..de5dacc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAuthorizerFactory.java @@ -32,8 +32,8 @@ @Override public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { - SQLStdHiveAccessController privilegeManager = - new SQLStdHiveAccessController(metastoreClientFactory, conf, authenticator, ctx); + SQLStdHiveAccessControllerWrapper privilegeManager = + new SQLStdHiveAccessControllerWrapper(metastoreClientFactory, conf, authenticator, ctx); return new HiveAuthorizerImpl( privilegeManager, new SQLStdHiveAuthorizationValidator(metastoreClientFactory, conf, authenticator, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 80d7933..4d9a3e6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -45,6 +45,8 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.ql.MapRedStats; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.spark.session.SparkSession; @@ -88,6 +90,8 @@ private static final String HDFS_SESSION_PATH_KEY = "_hive.hdfs.session.path"; private static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space"; private final Map> tempTables = new HashMap>(); + private final Map> tempTableColStats = + new HashMap>(); protected ClassLoader parentLoader; @@ -1159,6 +1163,10 @@ public void applyAuthorizationPolicy() throws HiveException { return tempTables; } + public Map> getTempTableColStats() { + return tempTableColStats; + } + /** * @return ip address for user running the query */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 7cb7c5e..f664a7b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -1166,6 +1166,10 @@ public static String getFullyQualifiedColumnName(String dbName, String tabName, return getFullyQualifiedName(dbName, tabName, partName, colName); } + public static String getFullyQualifiedTableName(String dbName, String tabName) { + return getFullyQualifiedName(dbName, tabName); + } + private static String getFullyQualifiedName(String... names) { List nonNullAndEmptyNames = Lists.newArrayList(); for (String name : names) { diff --git a/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto b/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto index d52d0b6..31c49f1 100644 --- a/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto +++ b/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto @@ -53,6 +53,12 @@ message DateStatistics { optional sint32 maximum = 2; } +message TimestampStatistics { + // min,max values saved as milliseconds since epoch + optional sint64 minimum = 1; + optional sint64 maximum = 2; +} + message BinaryStatistics { // sum will store the total binary blob length in a stripe optional sint64 sum = 1; @@ -67,6 +73,7 @@ message ColumnStatistics { optional DecimalStatistics decimalStatistics = 6; optional DateStatistics dateStatistics = 7; optional BinaryStatistics binaryStatistics = 8; + optional TimestampStatistics timestampStatistics = 9; } message RowIndexEntry { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java index 46f53e8..498a6c1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestFileDump.java @@ -92,7 +92,7 @@ public void testDump() throws Exception { } conf.set(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname, "COMPRESSION"); Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector, - 100000, CompressionKind.ZLIB, 10000, 10000); + 100000, CompressionKind.ZLIB, 10000, 1000); Random r1 = new Random(1); String[] words = new String[]{"It", "was", "the", "best", "of", "times,", "it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age", @@ -116,7 +116,7 @@ public void testDump() throws Exception { // replace stdout and run command System.setOut(new PrintStream(myOut)); - FileDump.main(new String[]{testFilePath.toString()}); + FileDump.main(new String[]{testFilePath.toString(), "--rowindex=1,2,3"}); System.out.flush(); System.setOut(origOut); @@ -138,7 +138,7 @@ public void testDictionaryThreshold() throws Exception { conf.set(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname, "COMPRESSION"); conf.setFloat(HiveConf.ConfVars.HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD.varname, 0.49f); Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector, - 100000, CompressionKind.ZLIB, 10000, 10000); + 100000, CompressionKind.ZLIB, 10000, 1000); Random r1 = new Random(1); String[] words = new String[]{"It", "was", "the", "best", "of", "times,", "it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age", @@ -171,7 +171,7 @@ public void testDictionaryThreshold() throws Exception { // replace stdout and run command System.setOut(new PrintStream(myOut)); - FileDump.main(new String[]{testFilePath.toString()}); + FileDump.main(new String[]{testFilePath.toString(), "--rowindex=1,2,3"}); System.out.flush(); System.setOut(origOut); diff --git a/ql/src/test/queries/clientnegative/authorization_create_tbl.q b/ql/src/test/queries/clientnegative/authorization_create_tbl.q new file mode 100644 index 0000000..f0f398f --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_create_tbl.q @@ -0,0 +1,11 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set hive.security.authorization.enabled=true; +set user.name=user33; +create database db23221; +use db23221; + +set user.name=user44; +create table twew221(a string); diff --git a/ql/src/test/queries/clientnegative/authorization_create_view.q b/ql/src/test/queries/clientnegative/authorization_create_view.q new file mode 100644 index 0000000..6438cdd --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_create_view.q @@ -0,0 +1,13 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; +set hive.security.authorization.enabled=true; + +set user.name=user3; +create database db1; +use db1; +create table tab1(i int); + +set user.name=user4; +-- create view should fail as view is being created in db that it does not own +create view db1.view1(i) as select * from tab1; diff --git a/ql/src/test/queries/clientnegative/authorization_ctas2.q b/ql/src/test/queries/clientnegative/authorization_ctas2.q new file mode 100644 index 0000000..0bf634c --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_ctas2.q @@ -0,0 +1,14 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; +set hive.security.authorization.enabled=true; + +set user.name=user_dbowner; +-- check ctas without db ownership +create database ctas_auth; + +set user.name=user_unauth; +create table t1(i int); +use ctas_auth; +show tables; +create table t2 as select * from default.t1; diff --git a/ql/src/test/queries/clientnegative/authorization_grant_group.q b/ql/src/test/queries/clientnegative/authorization_grant_group.q new file mode 100644 index 0000000..0325508 --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_grant_group.q @@ -0,0 +1,11 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set user.name=user1; +-- current user has been set (comment line before the set cmd is resulting in parse error!!) + +CREATE TABLE table_gg(i int); + +-- grant insert on group should fail +GRANT INSERT ON table_gg TO group g1; diff --git a/ql/src/test/queries/clientnegative/authorization_public_create.q b/ql/src/test/queries/clientnegative/authorization_public_create.q index 002389f..8298ce9 100644 --- a/ql/src/test/queries/clientnegative/authorization_public_create.q +++ b/ql/src/test/queries/clientnegative/authorization_public_create.q @@ -1 +1 @@ -create role PUBLIC; +create role public; diff --git a/ql/src/test/queries/clientnegative/authorization_public_drop.q b/ql/src/test/queries/clientnegative/authorization_public_drop.q index 69c5a8d..7e89f6e 100644 --- a/ql/src/test/queries/clientnegative/authorization_public_drop.q +++ b/ql/src/test/queries/clientnegative/authorization_public_drop.q @@ -1 +1 @@ -drop role PUBLIC; +drop role public; diff --git a/ql/src/test/queries/clientnegative/authorization_role_case.q b/ql/src/test/queries/clientnegative/authorization_role_case.q new file mode 100644 index 0000000..339239a --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_role_case.q @@ -0,0 +1,10 @@ +create role mixCaseRole1; +create role mixCaseRole2; + +show roles; + + +create table t1(i int); +grant SELECT on table t1 to role mixCaseRole1; +-- grant with wrong case should fail with legacy auth +grant UPDATE on table t1 to role mixcaserole2; diff --git a/ql/src/test/queries/clientnegative/authorize_create_tbl.q b/ql/src/test/queries/clientnegative/authorize_create_tbl.q deleted file mode 100644 index f0f398f..0000000 --- a/ql/src/test/queries/clientnegative/authorize_create_tbl.q +++ /dev/null @@ -1,11 +0,0 @@ -set hive.test.authz.sstd.hs2.mode=true; -set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; -set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; - -set hive.security.authorization.enabled=true; -set user.name=user33; -create database db23221; -use db23221; - -set user.name=user44; -create table twew221(a string); diff --git a/ql/src/test/queries/clientnegative/authorize_grant_public.q b/ql/src/test/queries/clientnegative/authorize_grant_public.q index bfd3165..e024b50 100644 --- a/ql/src/test/queries/clientnegative/authorize_grant_public.q +++ b/ql/src/test/queries/clientnegative/authorize_grant_public.q @@ -1 +1 @@ -grant role PUBLIC to user hive_test_user; +grant role public to user hive_test_user; diff --git a/ql/src/test/queries/clientnegative/authorize_revoke_public.q b/ql/src/test/queries/clientnegative/authorize_revoke_public.q index 2b29822..dadd424 100644 --- a/ql/src/test/queries/clientnegative/authorize_revoke_public.q +++ b/ql/src/test/queries/clientnegative/authorize_revoke_public.q @@ -1 +1 @@ -revoke role PUBLIC from user hive_test_user; +revoke role public from user hive_test_user; diff --git a/ql/src/test/queries/clientnegative/orc_merge1.q b/ql/src/test/queries/clientnegative/orc_merge1.q deleted file mode 100644 index b2d42cd..0000000 --- a/ql/src/test/queries/clientnegative/orc_merge1.q +++ /dev/null @@ -1,17 +0,0 @@ -DROP TABLE orcfile_merge; -set hive.exec.dynamic.partition=true; -set hive.exec.dynamic.partition.mode=nonstrict; - -set hive.exec.orc.default.buffer.size=1000; -CREATE TABLE orcfile_merge (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; - -INSERT OVERWRITE TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - -set hive.exec.orc.default.buffer.size=2000; -INSERT INTO TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - --- will fail because of different compression buffer size -ALTER TABLE orcfile_merge PARTITION(ds='2008-04-08',part=11) CONCATENATE; - -DROP TABLE orcfile_merge; diff --git a/ql/src/test/queries/clientnegative/orc_merge2.q b/ql/src/test/queries/clientnegative/orc_merge2.q deleted file mode 100644 index 2f62ee7..0000000 --- a/ql/src/test/queries/clientnegative/orc_merge2.q +++ /dev/null @@ -1,17 +0,0 @@ -DROP TABLE orcfile_merge; -set hive.exec.dynamic.partition=true; -set hive.exec.dynamic.partition.mode=nonstrict; - -set hive.exec.orc.default.compress=ZLIB; -CREATE TABLE orcfile_merge (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; - -INSERT OVERWRITE TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - -set hive.exec.orc.default.compress=NONE; -INSERT INTO TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - --- will fail because of different compression codec -ALTER TABLE orcfile_merge PARTITION(ds='2008-04-08',part=11) CONCATENATE; - -DROP TABLE orcfile_merge; diff --git a/ql/src/test/queries/clientnegative/orc_merge3.q b/ql/src/test/queries/clientnegative/orc_merge3.q deleted file mode 100644 index 5158e2e..0000000 --- a/ql/src/test/queries/clientnegative/orc_merge3.q +++ /dev/null @@ -1,17 +0,0 @@ -DROP TABLE orcfile_merge; -set hive.exec.dynamic.partition=true; -set hive.exec.dynamic.partition.mode=nonstrict; - -set hive.exec.orc.default.row.index.stride=1000; -CREATE TABLE orcfile_merge (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; - -INSERT OVERWRITE TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - -set hive.exec.orc.default.row.index.stride=2000; -INSERT INTO TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - --- will fail because of different row index stride -ALTER TABLE orcfile_merge PARTITION(ds='2008-04-08',part=11) CONCATENATE; - -DROP TABLE orcfile_merge; diff --git a/ql/src/test/queries/clientnegative/orc_merge4.q b/ql/src/test/queries/clientnegative/orc_merge4.q deleted file mode 100644 index ad48572..0000000 --- a/ql/src/test/queries/clientnegative/orc_merge4.q +++ /dev/null @@ -1,17 +0,0 @@ -DROP TABLE orcfile_merge; -set hive.exec.dynamic.partition=true; -set hive.exec.dynamic.partition.mode=nonstrict; - -CREATE TABLE orcfile_merge (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; - -INSERT OVERWRITE TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - -ALTER TABLE orcfile_merge ADD COLUMNS (newkey int); - -INSERT INTO TABLE orcfile_merge PARTITION(ds,part) SELECT key,value,key,ds,hr FROM srcpart; - --- will fail because of different column count -ALTER TABLE orcfile_merge PARTITION(ds='2008-04-08',part=11) CONCATENATE; - -DROP TABLE orcfile_merge; diff --git a/ql/src/test/queries/clientnegative/orc_merge5.q b/ql/src/test/queries/clientnegative/orc_merge5.q deleted file mode 100644 index e94a8cc..0000000 --- a/ql/src/test/queries/clientnegative/orc_merge5.q +++ /dev/null @@ -1,17 +0,0 @@ -DROP TABLE orcfile_merge; -set hive.exec.dynamic.partition=true; -set hive.exec.dynamic.partition.mode=nonstrict; - -set hive.exec.orc.write.format=0.11; -CREATE TABLE orcfile_merge (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC; - -INSERT OVERWRITE TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - -set hive.exec.orc.write.format=0.12; -INSERT INTO TABLE orcfile_merge PARTITION(ds,part) SELECT * FROM srcpart; - --- will fail because of different write format -ALTER TABLE orcfile_merge PARTITION(ds='2008-04-08',part=11) CONCATENATE; - -DROP TABLE orcfile_merge; diff --git a/ql/src/test/queries/clientnegative/temp_table_column_stats.q b/ql/src/test/queries/clientnegative/temp_table_column_stats.q deleted file mode 100644 index 9b7aa4a..0000000 --- a/ql/src/test/queries/clientnegative/temp_table_column_stats.q +++ /dev/null @@ -1,5 +0,0 @@ -create temporary table tmp1 (c1 string); --- table-level stats should work -analyze table tmp1 compute statistics; --- column stats should fail -analyze table tmp1 compute statistics for columns; diff --git a/ql/src/test/queries/clientpositive/add_part_exist.q b/ql/src/test/queries/clientpositive/add_part_exist.q index d176661..b8ace72 100644 --- a/ql/src/test/queries/clientpositive/add_part_exist.q +++ b/ql/src/test/queries/clientpositive/add_part_exist.q @@ -18,20 +18,21 @@ SHOW TABLES; -- Test ALTER TABLE ADD PARTITION in non-default Database CREATE DATABASE add_part_test_db; -USE add_part_test_db; -SHOW TABLES; -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); -SHOW PARTITIONS add_part_test; +CREATE TABLE add_part_test_db.add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING); +SHOW PARTITIONS add_part_test_db.add_part_test; -ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_db.add_part_test ADD PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test_db.add_part_test; -ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01'); +SHOW PARTITIONS add_part_test_db.add_part_test; -ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02'); +SHOW PARTITIONS add_part_test_db.add_part_test; -ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); -SHOW PARTITIONS add_part_test; +ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03'); +SHOW PARTITIONS add_part_test_db.add_part_test; + +DROP TABLE add_part_test_db.add_part_test; +DROP DATABASE add_part_test_db; diff --git a/ql/src/test/queries/clientpositive/alter1.q b/ql/src/test/queries/clientpositive/alter1.q index 312a017..2fac195 100644 --- a/ql/src/test/queries/clientpositive/alter1.q +++ b/ql/src/test/queries/clientpositive/alter1.q @@ -32,40 +32,38 @@ SHOW TABLES; -- With non-default Database CREATE DATABASE alter1_db; -USE alter1_db; -SHOW TABLES; +SHOW TABLES alter1_db; -CREATE TABLE alter1(a INT, b INT); -DESCRIBE EXTENDED alter1; +CREATE TABLE alter1_db.alter1(a INT, b INT); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='3'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='9'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20'); +DESCRIBE EXTENDED alter1_db.alter1; add jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar; -ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9'); -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9'); +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe'; -DESCRIBE EXTENDED alter1; +ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe'; +DESCRIBE EXTENDED alter1_db.alter1; -ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string); -DESCRIBE alter1; +ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string); +DESCRIBE alter1_db.alter1; -DROP TABLE alter1; -USE default; +DROP TABLE alter1_db.alter1; DROP DATABASE alter1_db; diff --git a/ql/src/test/queries/clientpositive/alter_char1.q b/ql/src/test/queries/clientpositive/alter_char1.q index d391138..b604656 100644 --- a/ql/src/test/queries/clientpositive/alter_char1.q +++ b/ql/src/test/queries/clientpositive/alter_char1.q @@ -1,34 +1,35 @@ -- SORT_QUERY_RESULTS -drop table alter_char_1; +create database ac; -create table alter_char_1 (key string, value string); -insert overwrite table alter_char_1 +create table ac.alter_char_1 (key string, value string); +insert overwrite table ac.alter_char_1 select key, value from src order by key limit 5; -select * from alter_char_1; +select * from ac.alter_char_1; -- change column to char -alter table alter_char_1 change column value value char(20); +alter table ac.alter_char_1 change column value value char(20); -- contents should still look the same -select * from alter_char_1; +select * from ac.alter_char_1; -- change column to smaller char -alter table alter_char_1 change column value value char(3); +alter table ac.alter_char_1 change column value value char(3); -- value column should be truncated now -select * from alter_char_1; +select * from ac.alter_char_1; -- change back to bigger char -alter table alter_char_1 change column value value char(20); +alter table ac.alter_char_1 change column value value char(20); -- column values should be full size again -select * from alter_char_1; +select * from ac.alter_char_1; -- add char column -alter table alter_char_1 add columns (key2 int, value2 char(10)); -select * from alter_char_1; +alter table ac.alter_char_1 add columns (key2 int, value2 char(10)); +select * from ac.alter_char_1; -insert overwrite table alter_char_1 +insert overwrite table ac.alter_char_1 select key, value, key, value from src order by key limit 5; -select * from alter_char_1; +select * from ac.alter_char_1; -drop table alter_char_1; +drop table ac.alter_char_1; +drop database ac; diff --git a/ql/src/test/queries/clientpositive/alter_index.q b/ql/src/test/queries/clientpositive/alter_index.q index 2aa13da..3a3d13c 100644 --- a/ql/src/test/queries/clientpositive/alter_index.q +++ b/ql/src/test/queries/clientpositive/alter_index.q @@ -1,11 +1,11 @@ drop index src_index_8 on src; -create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); +create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2"); desc extended default__src_src_index_8__; -alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3"); +alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3"); desc extended default__src_src_index_8__; -drop index src_index_8 on src; +drop index src_index_8 on default.src; show tables; diff --git a/ql/src/test/queries/clientpositive/alter_partition_coltype.q b/ql/src/test/queries/clientpositive/alter_partition_coltype.q index 115eaf9..8c9945c 100644 --- a/ql/src/test/queries/clientpositive/alter_partition_coltype.q +++ b/ql/src/test/queries/clientpositive/alter_partition_coltype.q @@ -51,18 +51,23 @@ desc alter_coltype partition (dt='100', ts=3.0); drop table alter_coltype; -create table alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string); +create database pt; + +create table pt.alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string); set hive.exec.dynamic.partition.mode=nonstrict; -insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5; +insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5; + +insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5; +insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5; -insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5; -insert into table alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5; +alter table pt.alterdynamic_part_table partition column (partcol1 int); -alter table alterdynamic_part_table partition column (partcol1 int); +explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1'; -explain extended select intcol from alterdynamic_part_table where partcol1='1' and partcol2='1'; +explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__'); +select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__'); -explain extended select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__'); -select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__'); \ No newline at end of file +drop table pt.alterdynamic_part_table; +drop database pt; diff --git a/ql/src/test/queries/clientpositive/alter_skewed_table.q b/ql/src/test/queries/clientpositive/alter_skewed_table.q index 216bbb5..cdc4567 100644 --- a/ql/src/test/queries/clientpositive/alter_skewed_table.q +++ b/ql/src/test/queries/clientpositive/alter_skewed_table.q @@ -1,6 +1,6 @@ set hive.mapred.supports.subdirectories=true; -create table original (key STRING, value STRING); +create table original (key STRING, value STRING); describe formatted original; @@ -10,23 +10,27 @@ describe formatted original; drop table original; -create table original2 (key STRING, value STRING) ; +create database skew_test; -describe formatted original2; +create table skew_test.original2 (key STRING, value STRING) ; -alter table original2 SKEWED BY (key, value) ON ((1,1),(5,6)); +describe formatted skew_test.original2; -describe formatted original2; +alter table skew_test.original2 SKEWED BY (key, value) ON ((1,1),(5,6)); -drop table original2; +describe formatted skew_test.original2; -create table original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)); +drop table skew_test.original2; -describe formatted original3; +create table skew_test.original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)); -alter table original3 not skewed; +describe formatted skew_test.original3; -describe formatted original3; +alter table skew_test.original3 not skewed; -drop table original3; +describe formatted skew_test.original3; + +drop table skew_test.original3; + +drop database skew_test; diff --git a/ql/src/test/queries/clientpositive/alter_varchar1.q b/ql/src/test/queries/clientpositive/alter_varchar1.q index 6f644a0..8ed3d20 100644 --- a/ql/src/test/queries/clientpositive/alter_varchar1.q +++ b/ql/src/test/queries/clientpositive/alter_varchar1.q @@ -1,34 +1,35 @@ -- SORT_QUERY_RESULTS -drop table alter_varchar_1; +create database avc; -create table alter_varchar_1 (key string, value string); -insert overwrite table alter_varchar_1 +create table avc.alter_varchar_1 (key string, value string); +insert overwrite table avc.alter_varchar_1 select key, value from src order by key limit 5; -select * from alter_varchar_1; +select * from avc.alter_varchar_1; -- change column to varchar -alter table alter_varchar_1 change column value value varchar(20); +alter table avc.alter_varchar_1 change column value value varchar(20); -- contents should still look the same -select * from alter_varchar_1; +select * from avc.alter_varchar_1; -- change column to smaller varchar -alter table alter_varchar_1 change column value value varchar(3); +alter table avc.alter_varchar_1 change column value value varchar(3); -- value column should be truncated now -select * from alter_varchar_1; +select * from avc.alter_varchar_1; -- change back to bigger varchar -alter table alter_varchar_1 change column value value varchar(20); +alter table avc.alter_varchar_1 change column value value varchar(20); -- column values should be full size again -select * from alter_varchar_1; +select * from avc.alter_varchar_1; -- add varchar column -alter table alter_varchar_1 add columns (key2 int, value2 varchar(10)); -select * from alter_varchar_1; +alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)); +select * from avc.alter_varchar_1; -insert overwrite table alter_varchar_1 +insert overwrite table avc.alter_varchar_1 select key, value, key, value from src order by key limit 5; -select * from alter_varchar_1; +select * from avc.alter_varchar_1; -drop table alter_varchar_1; +drop table avc.alter_varchar_1; +drop database avc; diff --git a/ql/src/test/queries/clientpositive/alter_view_as_select.q b/ql/src/test/queries/clientpositive/alter_view_as_select.q index dcab3ca..d2519a8 100644 --- a/ql/src/test/queries/clientpositive/alter_view_as_select.q +++ b/ql/src/test/queries/clientpositive/alter_view_as_select.q @@ -1,13 +1,16 @@ -DROP VIEW testView; -CREATE VIEW testView as SELECT * FROM srcpart; -DESCRIBE FORMATTED testView; +CREATE DATABASE tv; +CREATE VIEW tv.testView as SELECT * FROM srcpart; +DESCRIBE FORMATTED tv.testView; -ALTER VIEW testView AS SELECT value FROM src WHERE key=86; -DESCRIBE FORMATTED testView; +ALTER VIEW tv.testView AS SELECT value FROM src WHERE key=86; +DESCRIBE FORMATTED tv.testView; -ALTER VIEW testView AS +ALTER VIEW tv.testView AS SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key, value LIMIT 10; -DESCRIBE FORMATTED testView; +DESCRIBE FORMATTED tv.testView; + +DROP VIEW tv.testView; +DROP DATABASE tv; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/alter_view_rename.q b/ql/src/test/queries/clientpositive/alter_view_rename.q index 68cf9d6..f91fbb8 100644 --- a/ql/src/test/queries/clientpositive/alter_view_rename.q +++ b/ql/src/test/queries/clientpositive/alter_view_rename.q @@ -1,10 +1,16 @@ +CREATE DATABASE tv1; +CREATE DATABASE tv2; + CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING); -CREATE VIEW view1 as SELECT * FROM invites; -DESCRIBE EXTENDED view1; +CREATE VIEW tv1.view1 as SELECT * FROM invites; +DESCRIBE EXTENDED tv1.view1; -ALTER VIEW view1 RENAME TO view2; -DESCRIBE EXTENDED view2; -SELECT * FROM view2; +ALTER VIEW tv1.view1 RENAME TO tv2.view2; +DESCRIBE EXTENDED tv2.view2; +SELECT * FROM tv2.view2; DROP TABLE invites; -DROP VIEW view2; \ No newline at end of file +DROP VIEW tv2.view2; + +DROP DATABASE tv1; +DROP DATABASE tv2; diff --git a/ql/src/test/queries/clientpositive/archive_multi.q b/ql/src/test/queries/clientpositive/archive_multi.q index 2c1a6d8..1004aca 100644 --- a/ql/src/test/queries/clientpositive/archive_multi.q +++ b/ql/src/test/queries/clientpositive/archive_multi.q @@ -1,42 +1,41 @@ set hive.archive.enabled = true; set hive.enforce.bucketing = true; -drop table tstsrc; -drop table tstsrcpart; +create database ac_test; -create table tstsrc like src; -insert overwrite table tstsrc select key, value from src; +create table ac_test.tstsrc like default.src; +insert overwrite table ac_test.tstsrc select key, value from default.src; -create table tstsrcpart like srcpart; +create table ac_test.tstsrcpart like default.srcpart; -insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') -select key, value from srcpart where ds='2008-04-08' and hr='11'; +insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='11') +select key, value from default.srcpart where ds='2008-04-08' and hr='11'; -insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') -select key, value from srcpart where ds='2008-04-08' and hr='12'; +insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='12') +select key, value from default.srcpart where ds='2008-04-08' and hr='12'; -insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') -select key, value from srcpart where ds='2008-04-09' and hr='11'; +insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='11') +select key, value from default.srcpart where ds='2008-04-09' and hr='11'; -insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') -select key, value from srcpart where ds='2008-04-09' and hr='12'; +insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='12') +select key, value from default.srcpart where ds='2008-04-09' and hr='12'; -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2; -ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08'); +ALTER TABLE ac_test.tstsrcpart ARCHIVE PARTITION (ds='2008-04-08'); SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2; -SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key; +SELECT key, count(1) FROM ac_test.tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key; -SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +SELECT * FROM ac_test.tstsrcpart a JOIN ac_test.tstsrc b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0'; -ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08'); +ALTER TABLE ac_test.tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08'); SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2; +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2; diff --git a/ql/src/test/queries/clientpositive/authorization_1.q b/ql/src/test/queries/clientpositive/authorization_1.q index 25c9918..d5fd2ec 100644 --- a/ql/src/test/queries/clientpositive/authorization_1.q +++ b/ql/src/test/queries/clientpositive/authorization_1.q @@ -57,33 +57,33 @@ show grant group hive_test_group1 on table src_autho_test; show grant group hive_test_group1 on table src_autho_test(key); --role -create role src_role; -grant role src_role to user hive_test_user; +create role sRc_roLE; +grant role sRc_roLE to user hive_test_user; show role grant user hive_test_user; --column grant to role -grant select(key) on table src_autho_test to role src_role; +grant select(key) on table src_autho_test to role sRc_roLE; -show grant role src_role on table src_autho_test; -show grant role src_role on table src_autho_test(key); +show grant role sRc_roLE on table src_autho_test; +show grant role sRc_roLE on table src_autho_test(key); select key from src_autho_test order by key limit 20; -revoke select(key) on table src_autho_test from role src_role; +revoke select(key) on table src_autho_test from role sRc_roLE; --table grant to role -grant select on table src_autho_test to role src_role; +grant select on table src_autho_test to role sRc_roLE; select key from src_autho_test order by key limit 20; -show grant role src_role on table src_autho_test; -show grant role src_role on table src_autho_test(key); -revoke select on table src_autho_test from role src_role; +show grant role sRc_roLE on table src_autho_test; +show grant role sRc_roLE on table src_autho_test(key); +revoke select on table src_autho_test from role sRc_roLE; -- drop role -drop role src_role; +drop role sRc_roLE; set hive.security.authorization.enabled=false; drop table src_autho_test; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/authorization_5.q b/ql/src/test/queries/clientpositive/authorization_5.q index 8869edc..fec27b4 100644 --- a/ql/src/test/queries/clientpositive/authorization_5.q +++ b/ql/src/test/queries/clientpositive/authorization_5.q @@ -8,13 +8,13 @@ GRANT select ON DATABASE test_db TO USER hive_test_user; SHOW GRANT USER hive_test_user ON DATABASE test_db; -CREATE ROLE db_test_role; -GRANT ROLE db_test_role TO USER hive_test_user; +CREATE ROLE db_TEST_Role; +GRANT ROLE db_TEST_Role TO USER hive_test_user; SHOW ROLE GRANT USER hive_test_user; -GRANT drop ON DATABASE test_db TO ROLE db_test_role; -GRANT select ON DATABASE test_db TO ROLE db_test_role; +GRANT drop ON DATABASE test_db TO ROLE db_TEST_Role; +GRANT select ON DATABASE test_db TO ROLE db_TEST_Role; -SHOW GRANT ROLE db_test_role ON DATABASE test_db; +SHOW GRANT ROLE db_TEST_Role ON DATABASE test_db; DROP DATABASE IF EXISTS test_db; diff --git a/ql/src/test/queries/clientpositive/authorization_grant_public_role.q b/ql/src/test/queries/clientpositive/authorization_grant_public_role.q index fe177ac..8f5d762 100644 --- a/ql/src/test/queries/clientpositive/authorization_grant_public_role.q +++ b/ql/src/test/queries/clientpositive/authorization_grant_public_role.q @@ -9,10 +9,10 @@ CREATE TABLE t_gpr1(i int); -- all privileges should have been set for user -GRANT ALL ON t_gpr1 TO ROLE public; +GRANT ALL ON t_gpr1 TO ROLE pubLic; SHOW GRANT USER user1 ON TABLE t_gpr1; -SHOW GRANT ROLE public ON TABLE t_gpr1; +SHOW GRANT ROLE pubLic ON TABLE t_gpr1; set user.name=user2; SHOW CURRENT ROLES; diff --git a/ql/src/test/queries/clientpositive/authorization_role_grant2.q b/ql/src/test/queries/clientpositive/authorization_role_grant2.q index 95fa4e6..59359a7 100644 --- a/ql/src/test/queries/clientpositive/authorization_role_grant2.q +++ b/ql/src/test/queries/clientpositive/authorization_role_grant2.q @@ -31,7 +31,7 @@ set user.name=user2; grant src_role_wadmin to role sRc_role2; set user.name=hive_admin_user; -set role ADMIN; +set role ADMIn; grant src_role2 to user user3; set user.name=user3; diff --git a/ql/src/test/queries/clientpositive/create_or_replace_view.q b/ql/src/test/queries/clientpositive/create_or_replace_view.q index a8f59b7..0148224 100644 --- a/ql/src/test/queries/clientpositive/create_or_replace_view.q +++ b/ql/src/test/queries/clientpositive/create_or_replace_view.q @@ -1,32 +1,39 @@ -drop view v; -create view v as select * from srcpart; -describe formatted v; +create database vt; + +create view vt.v as select * from srcpart; +describe formatted vt.v; -- modifying definition of unpartitioned view -create or replace view v partitioned on (ds, hr) as select * from srcpart; -alter view v add partition (ds='2008-04-08',hr='11'); -alter view v add partition (ds='2008-04-08',hr='12'); -select * from v where value='val_409' and ds='2008-04-08' and hr='11'; -describe formatted v; -show partitions v; +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart; +alter view vt.v add partition (ds='2008-04-08',hr='11'); +alter view vt.v add partition (ds='2008-04-08',hr='12'); +select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11'; +describe formatted vt.v; +show partitions vt.v; + +alter view vt.v drop partition (ds='2008-04-08',hr='11'); +alter view vt.v drop partition (ds='2008-04-08',hr='12'); +show partitions vt.v; -- altering partitioned view 1 -create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart; -select * from v where value='val_409' and ds='2008-04-08' and hr='11'; -describe formatted v; -show partitions v; +create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart; +select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11'; +describe formatted vt.v; +show partitions vt.v; -- altering partitioned view 2 -create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart; -select * from v where value='val_409' and ds='2008-04-08' and hr='11'; -describe formatted v; -show partitions v; -drop view v; +create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart; +select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11'; +describe formatted vt.v; +show partitions vt.v; +drop view vt.v; -- updating to fix view with invalid definition create table srcpart_temp like srcpart; -create view v partitioned on (ds, hr) as select * from srcpart_temp; -drop table srcpart_temp; -- v is now invalid -create or replace view v partitioned on (ds, hr) as select * from srcpart; -describe formatted v; -drop view v; \ No newline at end of file +create view vt.v partitioned on (ds, hr) as select * from srcpart_temp; +drop table srcpart_temp; -- vt.v is now invalid +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart; +describe formatted vt.v; +drop view vt.v; + +drop database vt; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/drop_multi_partitions.q b/ql/src/test/queries/clientpositive/drop_multi_partitions.q index 14e2356..7ee7ae7 100644 --- a/ql/src/test/queries/clientpositive/drop_multi_partitions.q +++ b/ql/src/test/queries/clientpositive/drop_multi_partitions.q @@ -1,17 +1,23 @@ -create table mp (a string) partitioned by (b string, c string); +create database dmp; -alter table mp add partition (b='1', c='1'); -alter table mp add partition (b='1', c='2'); -alter table mp add partition (b='2', c='2'); +create table dmp.mp (a string) partitioned by (b string, c string); -show partitions mp; +alter table dmp.mp add partition (b='1', c='1'); +alter table dmp.mp add partition (b='1', c='2'); +alter table dmp.mp add partition (b='2', c='2'); -explain extended alter table mp drop partition (b='1'); -alter table mp drop partition (b='1'); +show partitions dmp.mp; -show partitions mp; +explain extended alter table dmp.mp drop partition (b='1'); +alter table dmp.mp drop partition (b='1'); + +show partitions dmp.mp; set hive.exec.drop.ignorenonexistent=false; -alter table mp drop if exists partition (b='3'); +alter table dmp.mp drop if exists partition (b='3'); + +show partitions dmp.mp; + +drop table dmp.mp; -show partitions mp; +drop database dmp; diff --git a/ql/src/test/queries/clientpositive/exchange_partition.q b/ql/src/test/queries/clientpositive/exchange_partition.q index 4be6e3f..96a8c90 100644 --- a/ql/src/test/queries/clientpositive/exchange_partition.q +++ b/ql/src/test/queries/clientpositive/exchange_partition.q @@ -1,12 +1,15 @@ -CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING); -CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING); -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +create database ex1; +create database ex2; -ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05'); -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +CREATE TABLE ex1.exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING); +CREATE TABLE ex2.exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING); +SHOW PARTITIONS ex1.exchange_part_test1; +SHOW PARTITIONS ex2.exchange_part_test2; -ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2; -SHOW PARTITIONS exchange_part_test1; -SHOW PARTITIONS exchange_part_test2; +ALTER TABLE ex2.exchange_part_test2 ADD PARTITION (ds='2013-04-05'); +SHOW PARTITIONS ex1.exchange_part_test1; +SHOW PARTITIONS ex2.exchange_part_test2; + +ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2; +SHOW PARTITIONS ex1.exchange_part_test1; +SHOW PARTITIONS ex2.exchange_part_test2; diff --git a/ql/src/test/queries/clientpositive/index_auto_empty.q b/ql/src/test/queries/clientpositive/index_auto_empty.q index 41f4a40..12c6681 100644 --- a/ql/src/test/queries/clientpositive/index_auto_empty.q +++ b/ql/src/test/queries/clientpositive/index_auto_empty.q @@ -1,22 +1,25 @@ -- Test to ensure that an empty index result is propagated correctly +CREATE DATABASE it; -- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE; +CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE; set hive.stats.dbclass=fs; --- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD; -ALTER INDEX temp_index ON temp REBUILD; +-- Build an index on it.temp. +CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD; +ALTER INDEX temp_index ON it.temp REBUILD; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET hive.optimize.index.filter=true; SET hive.optimize.index.filter.compact.minsize=0; -- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86; -EXPLAIN SELECT * FROM temp WHERE key = 86; -SELECT * FROM temp WHERE key = 86; +SELECT * FROM it.it__temp_temp_index__ WHERE key = 86; +EXPLAIN SELECT * FROM it.temp WHERE key = 86; +SELECT * FROM it.temp WHERE key = 86; SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; SET hive.optimize.index.filter=false; -DROP table temp; +DROP table it.temp; + +DROP DATABASE it; diff --git a/ql/src/test/queries/clientpositive/input46.q b/ql/src/test/queries/clientpositive/input46.q index 06291d0..ab863dc 100644 --- a/ql/src/test/queries/clientpositive/input46.q +++ b/ql/src/test/queries/clientpositive/input46.q @@ -1,6 +1,6 @@ create database if not exists table_in_database_creation; create table table_in_database_creation.test1 as select * from src limit 1; -create table `table_in_database_creation.test2` as select * from src limit 1; +create table `table_in_database_creation`.`test2` as select * from src limit 1; create table table_in_database_creation.test3 (a string); -create table `table_in_database_creation.test4` (a string); +create table `table_in_database_creation`.`test4` (a string); drop database table_in_database_creation cascade; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q new file mode 100644 index 0000000..9b846fe --- /dev/null +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q @@ -0,0 +1,35 @@ +-- SORT_QUERY_RESULTS + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; + +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; + +SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +set hive.merge.orcfile.stripe.level=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +-- 3 mappers +explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +set hive.exec.orc.write.format=0.12; +insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +set hive.exec.orc.write.format=0.11; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; + +-- 5 files total +analyze table orc_merge5b compute statistics noscan; +desc formatted orc_merge5b; +select * from orc_merge5b; + +set hive.merge.orcfile.stripe.level=true; +alter table orc_merge5b concatenate; + +-- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind +analyze table orc_merge5b compute statistics noscan; +desc formatted orc_merge5b; +select * from orc_merge5b; + diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q new file mode 100644 index 0000000..8aa48f2 --- /dev/null +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q @@ -0,0 +1,51 @@ +-- SORT_QUERY_RESULTS + +-- orc merge file tests for dynamic partition case + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; +create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc; + +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; + +SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +SET mapred.min.split.size=1000; +SET mapred.max.split.size=50000; +SET hive.optimize.index.filter=true; +set hive.merge.orcfile.stripe.level=false; +set hive.merge.tezfiles=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; +set hive.compute.splits.in.am=true; +set tez.am.grouping.min-size=1000; +set tez.am.grouping.max-size=50000; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.optimize.sort.dynamic.partition=false; + +explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5; +set hive.exec.orc.default.row.index.stride=1000; +insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid; +insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid; +set hive.exec.orc.default.row.index.stride=2000; +insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid; +insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid; + +analyze table orc_merge5a partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +desc formatted orc_merge5a partition(st=80.0); +desc formatted orc_merge5a partition(st=0.8); +show partitions orc_merge5a; +select * from orc_merge5a where userid<=13; + +set hive.merge.orcfile.stripe.level=true; +explain alter table orc_merge5a partition(st=80.0) concatenate; +alter table orc_merge5a partition(st=80.0) concatenate; +alter table orc_merge5a partition(st=0.8) concatenate; + +analyze table orc_merge5a partition(st=80.0) compute statistics noscan; +analyze table orc_merge5a partition(st=0.8) compute statistics noscan; +desc formatted orc_merge5a partition(st=80.0); +desc formatted orc_merge5a partition(st=0.8); +show partitions orc_merge5a; +select * from orc_merge5a where userid<=13; + diff --git a/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q b/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q new file mode 100644 index 0000000..dafd6cf --- /dev/null +++ b/ql/src/test/queries/clientpositive/orc_ppd_timestamp.q @@ -0,0 +1,97 @@ +SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +SET mapred.min.split.size=1000; +SET mapred.max.split.size=5000; + +create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216"); + +insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl; + +-- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) +select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01'; + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01'; + +set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp); + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp); + +set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)); + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)); + +set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp); + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp); + +set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where ts' '' in select clause diff --git a/ql/src/test/results/clientnegative/create_or_replace_view7.q.out b/ql/src/test/results/clientnegative/create_or_replace_view7.q.out index 2282480..ccc1f66 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view7.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view7.q.out @@ -9,28 +9,37 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 POSTHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@v1 PREHOOK: query: create view v2 partitioned on (ds, hr) as select * from v1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 POSTHOOK: query: create view v2 partitioned on (ds, hr) as select * from v1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 PREHOOK: query: create view v3 partitioned on (ds, hr) as select * from v2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Input: default@v1 PREHOOK: Input: default@v2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 POSTHOOK: query: create view v3 partitioned on (ds, hr) as select * from v2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v1 POSTHOOK: Input: default@v2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@v3 FAILED: SemanticException Recursive view default.v1 detected (cycle: default.v1 -> default.v3 -> default.v2 -> default.v1). diff --git a/ql/src/test/results/clientnegative/create_or_replace_view8.q.out b/ql/src/test/results/clientnegative/create_or_replace_view8.q.out index ea0310b..4a65a3a 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view8.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view8.q.out @@ -9,8 +9,11 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 POSTHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@v1 FAILED: SemanticException Recursive view default.v1 detected (cycle: default.v1 -> default.v1). diff --git a/ql/src/test/results/clientnegative/create_view_failure1.q.out b/ql/src/test/results/clientnegative/create_view_failure1.q.out index 5609590..4be2886 100644 --- a/ql/src/test/results/clientnegative/create_view_failure1.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure1.q.out @@ -15,4 +15,6 @@ POSTHOOK: Output: default@xxx12 PREHOOK: query: CREATE VIEW xxx12 AS SELECT key FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx12 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Table xxx12 already exists) diff --git a/ql/src/test/results/clientnegative/create_view_failure2.q.out b/ql/src/test/results/clientnegative/create_view_failure2.q.out index cfe318a..aadceeb 100644 --- a/ql/src/test/results/clientnegative/create_view_failure2.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure2.q.out @@ -6,10 +6,13 @@ PREHOOK: query: -- views and tables share the same namespace CREATE VIEW xxx4 AS SELECT key FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx4 POSTHOOK: query: -- views and tables share the same namespace CREATE VIEW xxx4 AS SELECT key FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx4 PREHOOK: query: CREATE TABLE xxx4(key int) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientnegative/create_view_failure4.q.out b/ql/src/test/results/clientnegative/create_view_failure4.q.out index 339eb04..8e47735 100644 --- a/ql/src/test/results/clientnegative/create_view_failure4.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure4.q.out @@ -7,4 +7,6 @@ CREATE VIEW xxx5(x,x) AS SELECT key,value FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx5 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Duplicate column name x in the table definition. diff --git a/ql/src/test/results/clientnegative/desc_failure3.q.out b/ql/src/test/results/clientnegative/desc_failure3.q.out index ce77731..443857b 100644 --- a/ql/src/test/results/clientnegative/desc_failure3.q.out +++ b/ql/src/test/results/clientnegative/desc_failure3.q.out @@ -7,10 +7,9 @@ POSTHOOK: Output: database:db1 PREHOOK: query: CREATE TABLE db1.t1(key1 INT, value1 STRING) PARTITIONED BY (ds STRING, part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.t1 +PREHOOK: Output: db1@t1 POSTHOOK: query: CREATE TABLE db1.t1(key1 INT, value1 STRING) PARTITIONED BY (ds STRING, part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.t1 POSTHOOK: Output: db1@t1 FAILED: SemanticException [Error 10004]: Invalid table alias or column reference db1.t1.key1 diff --git a/ql/src/test/results/clientnegative/drop_table_failure2.q.out b/ql/src/test/results/clientnegative/drop_table_failure2.q.out index adad9f4..956ed8b 100644 --- a/ql/src/test/results/clientnegative/drop_table_failure2.q.out +++ b/ql/src/test/results/clientnegative/drop_table_failure2.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE VIEW xxx6 AS SELECT key FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx6 POSTHOOK: query: CREATE VIEW xxx6 AS SELECT key FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx6 PREHOOK: query: -- Can't use DROP TABLE on a view DROP TABLE xxx6 diff --git a/ql/src/test/results/clientnegative/insert_view_failure.q.out b/ql/src/test/results/clientnegative/insert_view_failure.q.out index 05d0b75..7bc52cb 100644 --- a/ql/src/test/results/clientnegative/insert_view_failure.q.out +++ b/ql/src/test/results/clientnegative/insert_view_failure.q.out @@ -5,8 +5,11 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: CREATE VIEW xxx2 AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx2 POSTHOOK: query: CREATE VIEW xxx2 AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx2 FAILED: SemanticException [Error 10090]: A view cannot be used as target table for LOAD or INSERT diff --git a/ql/src/test/results/clientnegative/invalidate_view1.q.out b/ql/src/test/results/clientnegative/invalidate_view1.q.out index 59caaaf..7eff2b2 100644 --- a/ql/src/test/results/clientnegative/invalidate_view1.q.out +++ b/ql/src/test/results/clientnegative/invalidate_view1.q.out @@ -23,18 +23,24 @@ POSTHOOK: Output: default@xxx10 PREHOOK: query: CREATE VIEW xxx9 AS SELECT * FROM xxx10 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@xxx10 +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx9 POSTHOOK: query: CREATE VIEW xxx9 AS SELECT * FROM xxx10 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@xxx10 +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx9 PREHOOK: query: CREATE VIEW xxx8 AS SELECT * FROM xxx9 xxx PREHOOK: type: CREATEVIEW PREHOOK: Input: default@xxx10 PREHOOK: Input: default@xxx9 +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx8 POSTHOOK: query: CREATE VIEW xxx8 AS SELECT * FROM xxx9 xxx POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@xxx10 POSTHOOK: Input: default@xxx9 +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx8 PREHOOK: query: ALTER TABLE xxx10 REPLACE COLUMNS (key int) PREHOOK: type: ALTERTABLE_REPLACECOLS diff --git a/ql/src/test/results/clientnegative/load_view_failure.q.out b/ql/src/test/results/clientnegative/load_view_failure.q.out index d21c9bf..ba1e507 100644 --- a/ql/src/test/results/clientnegative/load_view_failure.q.out +++ b/ql/src/test/results/clientnegative/load_view_failure.q.out @@ -5,8 +5,11 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: CREATE VIEW xxx11 AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@xxx11 POSTHOOK: query: CREATE VIEW xxx11 AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx11 FAILED: SemanticException [Error 10090]: A view cannot be used as target table for LOAD or INSERT diff --git a/ql/src/test/results/clientnegative/recursive_view.q.out b/ql/src/test/results/clientnegative/recursive_view.q.out index 9dc22d3..f51453c 100644 --- a/ql/src/test/results/clientnegative/recursive_view.q.out +++ b/ql/src/test/results/clientnegative/recursive_view.q.out @@ -33,29 +33,38 @@ POSTHOOK: Output: default@t PREHOOK: query: create view r0 as select * from t PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t +PREHOOK: Output: database:default +PREHOOK: Output: default@r0 POSTHOOK: query: create view r0 as select * from t POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t +POSTHOOK: Output: database:default POSTHOOK: Output: default@r0 PREHOOK: query: create view r1 as select * from r0 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@r0 PREHOOK: Input: default@t +PREHOOK: Output: database:default +PREHOOK: Output: default@r1 POSTHOOK: query: create view r1 as select * from r0 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@r0 POSTHOOK: Input: default@t +POSTHOOK: Output: database:default POSTHOOK: Output: default@r1 PREHOOK: query: create view r2 as select * from r1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@r0 PREHOOK: Input: default@r1 PREHOOK: Input: default@t +PREHOOK: Output: database:default +PREHOOK: Output: default@r2 POSTHOOK: query: create view r2 as select * from r1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@r0 POSTHOOK: Input: default@r1 POSTHOOK: Input: default@t +POSTHOOK: Output: database:default POSTHOOK: Output: default@r2 PREHOOK: query: create view r3 as select * from r2 PREHOOK: type: CREATEVIEW @@ -63,12 +72,15 @@ PREHOOK: Input: default@r0 PREHOOK: Input: default@r1 PREHOOK: Input: default@r2 PREHOOK: Input: default@t +PREHOOK: Output: database:default +PREHOOK: Output: default@r3 POSTHOOK: query: create view r3 as select * from r2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@r0 POSTHOOK: Input: default@r1 POSTHOOK: Input: default@r2 POSTHOOK: Input: default@t +POSTHOOK: Output: database:default POSTHOOK: Output: default@r3 PREHOOK: query: drop view r0 PREHOOK: type: DROPVIEW diff --git a/ql/src/test/results/clientnegative/temp_table_column_stats.q.out b/ql/src/test/results/clientnegative/temp_table_column_stats.q.out deleted file mode 100644 index 486597a..0000000 --- a/ql/src/test/results/clientnegative/temp_table_column_stats.q.out +++ /dev/null @@ -1,19 +0,0 @@ -PREHOOK: query: create temporary table tmp1 (c1 string) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tmp1 -POSTHOOK: query: create temporary table tmp1 (c1 string) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tmp1 -PREHOOK: query: -- table-level stats should work -analyze table tmp1 compute statistics -PREHOOK: type: QUERY -PREHOOK: Input: default@tmp1 -PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- table-level stats should work -analyze table tmp1 compute statistics -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tmp1 -POSTHOOK: Output: default@tmp1 -FAILED: SemanticException tmp1 is a temporary table. Column statistics are not supported on temporary tables. diff --git a/ql/src/test/results/clientnegative/unset_view_property.q.out b/ql/src/test/results/clientnegative/unset_view_property.q.out index 46860b3..4c94e25 100644 --- a/ql/src/test/results/clientnegative/unset_view_property.q.out +++ b/ql/src/test/results/clientnegative/unset_view_property.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE VIEW testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@testView POSTHOOK: query: CREATE VIEW testView AS SELECT value FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@testView PREHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propB'='200') PREHOOK: type: ALTERVIEW_PROPERTIES diff --git a/ql/src/test/results/clientpositive/add_part_exist.q.out b/ql/src/test/results/clientpositive/add_part_exist.q.out index 4c22d6a..107cfdb 100644 --- a/ql/src/test/results/clientpositive/add_part_exist.q.out +++ b/ql/src/test/results/clientpositive/add_part_exist.q.out @@ -99,85 +99,91 @@ POSTHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database CREATE DATABASE add_part_test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:add_part_test_db -PREHOOK: query: USE add_part_test_db -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:add_part_test_db -POSTHOOK: query: USE add_part_test_db -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:add_part_test_db -PREHOOK: query: SHOW TABLES -PREHOOK: type: SHOWTABLES -POSTHOOK: query: SHOW TABLES -POSTHOOK: type: SHOWTABLES -PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE add_part_test_db.add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: add_part_test_db@add_part_test PREHOOK: Output: database:add_part_test_db -POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE add_part_test_db.add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: add_part_test_db@add_part_test POSTHOOK: Output: database:add_part_test_db -PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: add_part_test_db@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: add_part_test_db@add_part_test -PREHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +PREHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD PARTITION (ds='2010-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: add_part_test_db@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD PARTITION (ds='2010-01-01') +POSTHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD PARTITION (ds='2010-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: add_part_test_db@add_part_test POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-01 -PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: add_part_test_db@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: add_part_test_db@add_part_test ds=2010-01-01 -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +PREHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: add_part_test_db@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') +POSTHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: add_part_test_db@add_part_test -PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: add_part_test_db@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: add_part_test_db@add_part_test ds=2010-01-01 -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +PREHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: add_part_test_db@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') +POSTHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-02') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: add_part_test_db@add_part_test POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-02 -PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: add_part_test_db@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: add_part_test_db@add_part_test ds=2010-01-01 ds=2010-01-02 -PREHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +PREHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: add_part_test_db@add_part_test -POSTHOOK: query: ALTER TABLE add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') +POSTHOOK: query: ALTER TABLE add_part_test_db.add_part_test ADD IF NOT EXISTS PARTITION (ds='2010-01-01') PARTITION (ds='2010-01-02') PARTITION (ds='2010-01-03') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: add_part_test_db@add_part_test POSTHOOK: Output: add_part_test_db@add_part_test@ds=2010-01-03 -PREHOOK: query: SHOW PARTITIONS add_part_test +PREHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: add_part_test_db@add_part_test -POSTHOOK: query: SHOW PARTITIONS add_part_test +POSTHOOK: query: SHOW PARTITIONS add_part_test_db.add_part_test POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: add_part_test_db@add_part_test ds=2010-01-01 ds=2010-01-02 ds=2010-01-03 +PREHOOK: query: DROP TABLE add_part_test_db.add_part_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: add_part_test_db@add_part_test +PREHOOK: Output: add_part_test_db@add_part_test +POSTHOOK: query: DROP TABLE add_part_test_db.add_part_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: add_part_test_db@add_part_test +POSTHOOK: Output: add_part_test_db@add_part_test +PREHOOK: query: DROP DATABASE add_part_test_db +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:add_part_test_db +PREHOOK: Output: database:add_part_test_db +POSTHOOK: query: DROP DATABASE add_part_test_db +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:add_part_test_db +POSTHOOK: Output: database:add_part_test_db diff --git a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out index 3e918be..dd1e9e3 100644 --- a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out +++ b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out @@ -164,10 +164,13 @@ PREHOOK: query: -- HIVE-4181 TOK_FUNCTIONSTAR for UDTF create table allcolref as select array(key, value) from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@allcolref POSTHOOK: query: -- HIVE-4181 TOK_FUNCTIONSTAR for UDTF create table allcolref as select array(key, value) from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@allcolref PREHOOK: query: explain select explode(*) as x from allcolref limit 10 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/alter1.q.out b/ql/src/test/results/clientpositive/alter1.q.out index 1cfaf75..7c78410 100644 --- a/ql/src/test/results/clientpositive/alter1.q.out +++ b/ql/src/test/results/clientpositive/alter1.q.out @@ -210,209 +210,197 @@ POSTHOOK: query: -- With non-default Database CREATE DATABASE alter1_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter1_db -PREHOOK: query: USE alter1_db -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:alter1_db -POSTHOOK: query: USE alter1_db -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:alter1_db -PREHOOK: query: SHOW TABLES +PREHOOK: query: SHOW TABLES alter1_db PREHOOK: type: SHOWTABLES -POSTHOOK: query: SHOW TABLES +POSTHOOK: query: SHOW TABLES alter1_db POSTHOOK: type: SHOWTABLES -PREHOOK: query: CREATE TABLE alter1(a INT, b INT) +PREHOOK: query: CREATE TABLE alter1_db.alter1(a INT, b INT) PREHOOK: type: CREATETABLE PREHOOK: Output: alter1_db@alter1 PREHOOK: Output: database:alter1_db -POSTHOOK: query: CREATE TABLE alter1(a INT, b INT) +POSTHOOK: query: CREATE TABLE alter1_db.alter1(a INT, b INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: alter1_db@alter1 POSTHOOK: Output: database:alter1_db -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='3') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('a'='1', 'c'='4', 'd'='3') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET TBLPROPERTIES ('EXTERNAL'='FALSE') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='9') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='9') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='9') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9') POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a string b string #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +PREHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' +POSTHOOK: query: ALTER TABLE alter1_db.alter1 SET SERDE 'org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE EXTENDED alter1 +PREHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE EXTENDED alter1 +POSTHOOK: query: DESCRIBE EXTENDED alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a string b string #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +PREHOOK: query: ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: ALTER TABLE alter1 REPLACE COLUMNS (a int, b int, c string) +POSTHOOK: query: ALTER TABLE alter1_db.alter1 REPLACE COLUMNS (a int, b int, c string) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: DESCRIBE alter1 +PREHOOK: query: DESCRIBE alter1_db.alter1 PREHOOK: type: DESCTABLE PREHOOK: Input: alter1_db@alter1 -POSTHOOK: query: DESCRIBE alter1 +POSTHOOK: query: DESCRIBE alter1_db.alter1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: alter1_db@alter1 a int b int c string -PREHOOK: query: DROP TABLE alter1 +PREHOOK: query: DROP TABLE alter1_db.alter1 PREHOOK: type: DROPTABLE PREHOOK: Input: alter1_db@alter1 PREHOOK: Output: alter1_db@alter1 -POSTHOOK: query: DROP TABLE alter1 +POSTHOOK: query: DROP TABLE alter1_db.alter1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: alter1_db@alter1 POSTHOOK: Output: alter1_db@alter1 -PREHOOK: query: USE default -PREHOOK: type: SWITCHDATABASE -PREHOOK: Input: database:default -POSTHOOK: query: USE default -POSTHOOK: type: SWITCHDATABASE -POSTHOOK: Input: database:default PREHOOK: query: DROP DATABASE alter1_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:alter1_db diff --git a/ql/src/test/results/clientpositive/alter_char1.q.out b/ql/src/test/results/clientpositive/alter_char1.q.out index 017da60..b1a88df 100644 --- a/ql/src/test/results/clientpositive/alter_char1.q.out +++ b/ql/src/test/results/clientpositive/alter_char1.q.out @@ -1,38 +1,40 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -drop table alter_char_1 -PREHOOK: type: DROPTABLE +create database ac +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:ac POSTHOOK: query: -- SORT_QUERY_RESULTS -drop table alter_char_1 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table alter_char_1 (key string, value string) +create database ac +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:ac +PREHOOK: query: create table ac.alter_char_1 (key string, value string) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@alter_char_1 -POSTHOOK: query: create table alter_char_1 (key string, value string) +PREHOOK: Output: ac@alter_char_1 +PREHOOK: Output: database:ac +POSTHOOK: query: create table ac.alter_char_1 (key string, value string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@alter_char_1 -PREHOOK: query: insert overwrite table alter_char_1 +POSTHOOK: Output: ac@alter_char_1 +POSTHOOK: Output: database:ac +PREHOOK: query: insert overwrite table ac.alter_char_1 select key, value from src order by key limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alter_char_1 -POSTHOOK: query: insert overwrite table alter_char_1 +PREHOOK: Output: ac@alter_char_1 +POSTHOOK: query: insert overwrite table ac.alter_char_1 select key, value from src order by key limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 POSTHOOK: Lineage: alter_char_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_char_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -40,24 +42,24 @@ POSTHOOK: Input: default@alter_char_1 10 val_10 100 val_100 PREHOOK: query: -- change column to char -alter table alter_char_1 change column value value char(20) +alter table ac.alter_char_1 change column value value char(20) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_char_1 -PREHOOK: Output: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 +PREHOOK: Output: ac@alter_char_1 POSTHOOK: query: -- change column to char -alter table alter_char_1 change column value value char(20) +alter table ac.alter_char_1 change column value value char(20) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_char_1 -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 PREHOOK: query: -- contents should still look the same -select * from alter_char_1 +select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### POSTHOOK: query: -- contents should still look the same -select * from alter_char_1 +select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -65,24 +67,24 @@ POSTHOOK: Input: default@alter_char_1 10 val_10 100 val_100 PREHOOK: query: -- change column to smaller char -alter table alter_char_1 change column value value char(3) +alter table ac.alter_char_1 change column value value char(3) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_char_1 -PREHOOK: Output: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 +PREHOOK: Output: ac@alter_char_1 POSTHOOK: query: -- change column to smaller char -alter table alter_char_1 change column value value char(3) +alter table ac.alter_char_1 change column value value char(3) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_char_1 -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 PREHOOK: query: -- value column should be truncated now -select * from alter_char_1 +select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### POSTHOOK: query: -- value column should be truncated now -select * from alter_char_1 +select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val 0 val @@ -90,24 +92,24 @@ POSTHOOK: Input: default@alter_char_1 10 val 100 val PREHOOK: query: -- change back to bigger char -alter table alter_char_1 change column value value char(20) +alter table ac.alter_char_1 change column value value char(20) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_char_1 -PREHOOK: Output: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 +PREHOOK: Output: ac@alter_char_1 POSTHOOK: query: -- change back to bigger char -alter table alter_char_1 change column value value char(20) +alter table ac.alter_char_1 change column value value char(20) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_char_1 -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 PREHOOK: query: -- column values should be full size again -select * from alter_char_1 +select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### POSTHOOK: query: -- column values should be full size again -select * from alter_char_1 +select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -115,60 +117,68 @@ POSTHOOK: Input: default@alter_char_1 10 val_10 100 val_100 PREHOOK: query: -- add char column -alter table alter_char_1 add columns (key2 int, value2 char(10)) +alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@alter_char_1 -PREHOOK: Output: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 +PREHOOK: Output: ac@alter_char_1 POSTHOOK: query: -- add char column -alter table alter_char_1 add columns (key2 int, value2 char(10)) +alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@alter_char_1 -POSTHOOK: Output: default@alter_char_1 -PREHOOK: query: select * from alter_char_1 +POSTHOOK: Input: ac@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val_0 NULL NULL 0 val_0 NULL NULL 0 val_0 NULL NULL 10 val_10 NULL NULL 100 val_100 NULL NULL -PREHOOK: query: insert overwrite table alter_char_1 +PREHOOK: query: insert overwrite table ac.alter_char_1 select key, value, key, value from src order by key limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alter_char_1 -POSTHOOK: query: insert overwrite table alter_char_1 +PREHOOK: Output: ac@alter_char_1 +POSTHOOK: query: insert overwrite table ac.alter_char_1 select key, value, key, value from src order by key limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 POSTHOOK: Lineage: alter_char_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_char_1.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_char_1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: alter_char_1.value2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_char_1 +PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 10 val_10 10 val_10 100 val_100 100 val_100 -PREHOOK: query: drop table alter_char_1 +PREHOOK: query: drop table ac.alter_char_1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@alter_char_1 -PREHOOK: Output: default@alter_char_1 -POSTHOOK: query: drop table alter_char_1 +PREHOOK: Input: ac@alter_char_1 +PREHOOK: Output: ac@alter_char_1 +POSTHOOK: query: drop table ac.alter_char_1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@alter_char_1 -POSTHOOK: Output: default@alter_char_1 +POSTHOOK: Input: ac@alter_char_1 +POSTHOOK: Output: ac@alter_char_1 +PREHOOK: query: drop database ac +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:ac +PREHOOK: Output: database:ac +POSTHOOK: query: drop database ac +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:ac +POSTHOOK: Output: database:ac diff --git a/ql/src/test/results/clientpositive/alter_index.q.out b/ql/src/test/results/clientpositive/alter_index.q.out index 2093e2f..c69127a 100644 --- a/ql/src/test/results/clientpositive/alter_index.q.out +++ b/ql/src/test/results/clientpositive/alter_index.q.out @@ -4,10 +4,10 @@ PREHOOK: Input: default@src POSTHOOK: query: drop index src_index_8 on src POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@src -PREHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") +PREHOOK: query: create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: create index src_index_8 on table src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") +POSTHOOK: query: create index src_index_8 on table default.src(key) as 'compact' WITH DEFERRED REBUILD IDXPROPERTIES ("prop1"="val1", "prop2"="val2") POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_index_8__ @@ -22,9 +22,9 @@ _bucketname string _offsets array #### A masked pattern was here #### -PREHOOK: query: alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") +PREHOOK: query: alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") PREHOOK: type: ALTERINDEX_PROPS -POSTHOOK: query: alter index src_index_8 on src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") +POSTHOOK: query: alter index src_index_8 on default.src set IDXPROPERTIES ("prop1"="val1_new", "prop3"="val3") POSTHOOK: type: ALTERINDEX_PROPS PREHOOK: query: desc extended default__src_src_index_8__ PREHOOK: type: DESCTABLE @@ -37,10 +37,10 @@ _bucketname string _offsets array #### A masked pattern was here #### -PREHOOK: query: drop index src_index_8 on src +PREHOOK: query: drop index src_index_8 on default.src PREHOOK: type: DROPINDEX PREHOOK: Input: default@src -POSTHOOK: query: drop index src_index_8 on src +POSTHOOK: query: drop index src_index_8 on default.src POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@src PREHOOK: query: show tables diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out index 25eb48c..4f26fec 100644 --- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out @@ -939,51 +939,57 @@ POSTHOOK: query: drop table alter_coltype POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype -PREHOOK: query: create table alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) +PREHOOK: query: create database pt +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:pt +POSTHOOK: query: create database pt +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:pt +PREHOOK: query: create table pt.alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@alterdynamic_part_table -POSTHOOK: query: create table alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) +PREHOOK: Output: database:pt +PREHOOK: Output: pt@alterdynamic_part_table +POSTHOOK: query: create table pt.alterdynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@alterdynamic_part_table -PREHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 +POSTHOOK: Output: database:pt +POSTHOOK: Output: pt@alterdynamic_part_table +PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alterdynamic_part_table -POSTHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 +PREHOOK: Output: pt@alterdynamic_part_table +POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alterdynamic_part_table@partcol1=1/partcol2=1 +POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=1/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol SIMPLE [] -PREHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 +PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alterdynamic_part_table -POSTHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 +PREHOOK: Output: pt@alterdynamic_part_table +POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select '1', '2', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alterdynamic_part_table@partcol1=2/partcol2=1 +POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=2/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=2,partcol2=1).intcol SIMPLE [] -PREHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 +PREHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alterdynamic_part_table -POSTHOOK: query: insert into table alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 +PREHOOK: Output: pt@alterdynamic_part_table +POSTHOOK: query: insert into table pt.alterdynamic_part_table partition(partcol1, partcol2) select NULL, '1', '1' from src where key=150 limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alterdynamic_part_table@partcol1=1/partcol2=1 +POSTHOOK: Output: pt@alterdynamic_part_table@partcol1=1/partcol2=1 POSTHOOK: Lineage: alterdynamic_part_table PARTITION(partcol1=1,partcol2=1).intcol SIMPLE [] -PREHOOK: query: alter table alterdynamic_part_table partition column (partcol1 int) +PREHOOK: query: alter table pt.alterdynamic_part_table partition column (partcol1 int) PREHOOK: type: ALTERTABLE_PARTCOLTYPE -PREHOOK: Input: default@alterdynamic_part_table -POSTHOOK: query: alter table alterdynamic_part_table partition column (partcol1 int) +PREHOOK: Input: pt@alterdynamic_part_table +POSTHOOK: query: alter table pt.alterdynamic_part_table partition column (partcol1 int) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE -POSTHOOK: Input: default@alterdynamic_part_table -POSTHOOK: Output: default@alterdynamic_part_table -PREHOOK: query: explain extended select intcol from alterdynamic_part_table where partcol1='1' and partcol2='1' +POSTHOOK: Input: pt@alterdynamic_part_table +POSTHOOK: Output: pt@alterdynamic_part_table +PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1' PREHOOK: type: QUERY -POSTHOOK: query: explain extended select intcol from alterdynamic_part_table where partcol1='1' and partcol2='1' +POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where partcol1='1' and partcol2='1' POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -991,6 +997,7 @@ TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME + pt alterdynamic_part_table TOK_INSERT TOK_DESTINATION @@ -1067,7 +1074,7 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.alterdynamic_part_table + name pt.alterdynamic_part_table numFiles 2 numRows 1 partition_columns partcol1/partcol2 @@ -1088,7 +1095,7 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.alterdynamic_part_table + name pt.alterdynamic_part_table partition_columns partcol1/partcol2 partition_columns.types int:string serialization.ddl struct alterdynamic_part_table { string intcol} @@ -1096,10 +1103,10 @@ STAGE PLANS: serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alterdynamic_part_table - name: default.alterdynamic_part_table + name: pt.alterdynamic_part_table + name: pt.alterdynamic_part_table Truncated Path -> Alias: - /alterdynamic_part_table/partcol1=1/partcol2=1 [alterdynamic_part_table] + /pt.db/alterdynamic_part_table/partcol1=1/partcol2=1 [alterdynamic_part_table] Stage: Stage-0 Fetch Operator @@ -1107,9 +1114,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: explain extended select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') +PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY -POSTHOOK: query: explain extended select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') +POSTHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: QUERY ABSTRACT SYNTAX TREE: @@ -1117,6 +1124,7 @@ TOK_QUERY TOK_FROM TOK_TABREF TOK_TABNAME + pt alterdynamic_part_table TOK_INSERT TOK_DESTINATION @@ -1203,7 +1211,7 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.alterdynamic_part_table + name pt.alterdynamic_part_table numFiles 1 numRows 1 partition_columns partcol1/partcol2 @@ -1224,7 +1232,7 @@ STAGE PLANS: columns.comments columns.types string #### A masked pattern was here #### - name default.alterdynamic_part_table + name pt.alterdynamic_part_table partition_columns partcol1/partcol2 partition_columns.types int:string serialization.ddl struct alterdynamic_part_table { string intcol} @@ -1232,10 +1240,10 @@ STAGE PLANS: serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.alterdynamic_part_table - name: default.alterdynamic_part_table + name: pt.alterdynamic_part_table + name: pt.alterdynamic_part_table Truncated Path -> Alias: - /alterdynamic_part_table/partcol1=2/partcol2=1 [alterdynamic_part_table] + /pt.db/alterdynamic_part_table/partcol1=2/partcol2=1 [alterdynamic_part_table] Stage: Stage-0 Fetch Operator @@ -1243,14 +1251,30 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') +PREHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY -PREHOOK: Input: default@alterdynamic_part_table -PREHOOK: Input: default@alterdynamic_part_table@partcol1=2/partcol2=1 +PREHOOK: Input: pt@alterdynamic_part_table +PREHOOK: Input: pt@alterdynamic_part_table@partcol1=2/partcol2=1 #### A masked pattern was here #### -POSTHOOK: query: select intcol from alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') +POSTHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') POSTHOOK: type: QUERY -POSTHOOK: Input: default@alterdynamic_part_table -POSTHOOK: Input: default@alterdynamic_part_table@partcol1=2/partcol2=1 +POSTHOOK: Input: pt@alterdynamic_part_table +POSTHOOK: Input: pt@alterdynamic_part_table@partcol1=2/partcol2=1 #### A masked pattern was here #### 1 +PREHOOK: query: drop table pt.alterdynamic_part_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: pt@alterdynamic_part_table +PREHOOK: Output: pt@alterdynamic_part_table +POSTHOOK: query: drop table pt.alterdynamic_part_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: pt@alterdynamic_part_table +POSTHOOK: Output: pt@alterdynamic_part_table +PREHOOK: query: drop database pt +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:pt +PREHOOK: Output: database:pt +POSTHOOK: query: drop database pt +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:pt +POSTHOOK: Output: database:pt diff --git a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out index 5e7c3e2..d35bf40 100644 --- a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table src_auth_tmp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_auth_tmp POSTHOOK: query: -- SORT_BEFORE_DIFF create table src_auth_tmp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_auth_tmp PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/alter_rename_table.q.out b/ql/src/test/results/clientpositive/alter_rename_table.q.out index 970b43b..732d8a2 100644 --- a/ql/src/test/results/clientpositive/alter_rename_table.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_table.q.out @@ -13,11 +13,10 @@ POSTHOOK: Output: database:target PREHOOK: query: create table source.src like default.src PREHOOK: type: CREATETABLE PREHOOK: Output: database:source -PREHOOK: Output: source@source.src +PREHOOK: Output: source@src POSTHOOK: query: create table source.src like default.src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:source -POSTHOOK: Output: source@source.src POSTHOOK: Output: source@src PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src PREHOOK: type: LOAD @@ -30,11 +29,10 @@ POSTHOOK: Output: source@src PREHOOK: query: create table source.srcpart like default.srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:source -PREHOOK: Output: source@source.srcpart +PREHOOK: Output: source@srcpart POSTHOOK: query: create table source.srcpart like default.srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:source -POSTHOOK: Output: source@source.srcpart POSTHOOK: Output: source@srcpart PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.srcpart partition (ds='2008-04-08', hr='11') PREHOOK: type: LOAD @@ -215,20 +213,18 @@ POSTHOOK: Input: target@srcpart@ds=2008-04-09/hr=12 PREHOOK: query: create table source.src like default.src PREHOOK: type: CREATETABLE PREHOOK: Output: database:source -PREHOOK: Output: source@source.src +PREHOOK: Output: source@src POSTHOOK: query: create table source.src like default.src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:source -POSTHOOK: Output: source@source.src POSTHOOK: Output: source@src PREHOOK: query: create table source.src1 like default.src PREHOOK: type: CREATETABLE PREHOOK: Output: database:source -PREHOOK: Output: source@source.src1 +PREHOOK: Output: source@src1 POSTHOOK: query: create table source.src1 like default.src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:source -POSTHOOK: Output: source@source.src1 POSTHOOK: Output: source@src1 PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src PREHOOK: type: LOAD diff --git a/ql/src/test/results/clientpositive/alter_skewed_table.q.out b/ql/src/test/results/clientpositive/alter_skewed_table.q.out index e6bfc5a..0e0c5b0 100644 --- a/ql/src/test/results/clientpositive/alter_skewed_table.q.out +++ b/ql/src/test/results/clientpositive/alter_skewed_table.q.out @@ -92,27 +92,33 @@ POSTHOOK: query: drop table original POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@original POSTHOOK: Output: default@original -PREHOOK: query: create table original2 (key STRING, value STRING) +PREHOOK: query: create database skew_test +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:skew_test +POSTHOOK: query: create database skew_test +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:skew_test +PREHOOK: query: create table skew_test.original2 (key STRING, value STRING) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@original2 -POSTHOOK: query: create table original2 (key STRING, value STRING) +PREHOOK: Output: database:skew_test +PREHOOK: Output: skew_test@original2 +POSTHOOK: query: create table skew_test.original2 (key STRING, value STRING) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@original2 -PREHOOK: query: describe formatted original2 +POSTHOOK: Output: database:skew_test +POSTHOOK: Output: skew_test@original2 +PREHOOK: query: describe formatted skew_test.original2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@original2 -POSTHOOK: query: describe formatted original2 +PREHOOK: Input: skew_test@original2 +POSTHOOK: query: describe formatted skew_test.original2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@original2 +POSTHOOK: Input: skew_test@original2 # col_name data_type comment key string value string # Detailed Table Information -Database: default +Database: skew_test #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -131,27 +137,27 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table original2 SKEWED BY (key, value) ON ((1,1),(5,6)) +PREHOOK: query: alter table skew_test.original2 SKEWED BY (key, value) ON ((1,1),(5,6)) PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@original2 -PREHOOK: Output: default@original2 -POSTHOOK: query: alter table original2 SKEWED BY (key, value) ON ((1,1),(5,6)) +PREHOOK: Input: skew_test@original2 +PREHOOK: Output: skew_test@original2 +POSTHOOK: query: alter table skew_test.original2 SKEWED BY (key, value) ON ((1,1),(5,6)) POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@original2 -POSTHOOK: Output: default@original2 -PREHOOK: query: describe formatted original2 +POSTHOOK: Input: skew_test@original2 +POSTHOOK: Output: skew_test@original2 +PREHOOK: query: describe formatted skew_test.original2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@original2 -POSTHOOK: query: describe formatted original2 +PREHOOK: Input: skew_test@original2 +POSTHOOK: query: describe formatted skew_test.original2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@original2 +POSTHOOK: Input: skew_test@original2 # col_name data_type comment key string value string # Detailed Table Information -Database: default +Database: skew_test #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -178,35 +184,35 @@ Skewed Columns: [key, value] Skewed Values: [[1, 1], [5, 6]] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table original2 +PREHOOK: query: drop table skew_test.original2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@original2 -PREHOOK: Output: default@original2 -POSTHOOK: query: drop table original2 +PREHOOK: Input: skew_test@original2 +PREHOOK: Output: skew_test@original2 +POSTHOOK: query: drop table skew_test.original2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@original2 -POSTHOOK: Output: default@original2 -PREHOOK: query: create table original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)) +POSTHOOK: Input: skew_test@original2 +POSTHOOK: Output: skew_test@original2 +PREHOOK: query: create table skew_test.original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@original3 -POSTHOOK: query: create table original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)) +PREHOOK: Output: database:skew_test +PREHOOK: Output: skew_test@original3 +POSTHOOK: query: create table skew_test.original3 (key STRING, value STRING) SKEWED BY (key, value) ON ((1,1),(5,6)) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@original3 -PREHOOK: query: describe formatted original3 +POSTHOOK: Output: database:skew_test +POSTHOOK: Output: skew_test@original3 +PREHOOK: query: describe formatted skew_test.original3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@original3 -POSTHOOK: query: describe formatted original3 +PREHOOK: Input: skew_test@original3 +POSTHOOK: query: describe formatted skew_test.original3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@original3 +POSTHOOK: Input: skew_test@original3 # col_name data_type comment key string value string # Detailed Table Information -Database: default +Database: skew_test #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -227,27 +233,27 @@ Skewed Columns: [key, value] Skewed Values: [[1, 1], [5, 6]] Storage Desc Params: serialization.format 1 -PREHOOK: query: alter table original3 not skewed +PREHOOK: query: alter table skew_test.original3 not skewed PREHOOK: type: ALTERTABLE_SKEWED -PREHOOK: Input: default@original3 -PREHOOK: Output: default@original3 -POSTHOOK: query: alter table original3 not skewed +PREHOOK: Input: skew_test@original3 +PREHOOK: Output: skew_test@original3 +POSTHOOK: query: alter table skew_test.original3 not skewed POSTHOOK: type: ALTERTABLE_SKEWED -POSTHOOK: Input: default@original3 -POSTHOOK: Output: default@original3 -PREHOOK: query: describe formatted original3 +POSTHOOK: Input: skew_test@original3 +POSTHOOK: Output: skew_test@original3 +PREHOOK: query: describe formatted skew_test.original3 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@original3 -POSTHOOK: query: describe formatted original3 +PREHOOK: Input: skew_test@original3 +POSTHOOK: query: describe formatted skew_test.original3 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@original3 +POSTHOOK: Input: skew_test@original3 # col_name data_type comment key string value string # Detailed Table Information -Database: default +Database: skew_test #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -272,11 +278,19 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: drop table original3 +PREHOOK: query: drop table skew_test.original3 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@original3 -PREHOOK: Output: default@original3 -POSTHOOK: query: drop table original3 +PREHOOK: Input: skew_test@original3 +PREHOOK: Output: skew_test@original3 +POSTHOOK: query: drop table skew_test.original3 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@original3 -POSTHOOK: Output: default@original3 +POSTHOOK: Input: skew_test@original3 +POSTHOOK: Output: skew_test@original3 +PREHOOK: query: drop database skew_test +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:skew_test +PREHOOK: Output: database:skew_test +POSTHOOK: query: drop database skew_test +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:skew_test +POSTHOOK: Output: database:skew_test diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out index 14940af..3613598 100644 --- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out +++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out @@ -1,9 +1,12 @@ PREHOOK: query: create table src_stat as select * from src1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src1 +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat POSTHOOK: query: create table src_stat as select * from src1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_stat PREHOOK: query: create table src_stat_int ( key double, diff --git a/ql/src/test/results/clientpositive/alter_varchar1.q.out b/ql/src/test/results/clientpositive/alter_varchar1.q.out index e74a7ed..d1928ef 100644 --- a/ql/src/test/results/clientpositive/alter_varchar1.q.out +++ b/ql/src/test/results/clientpositive/alter_varchar1.q.out @@ -1,38 +1,40 @@ PREHOOK: query: -- SORT_QUERY_RESULTS -drop table alter_varchar_1 -PREHOOK: type: DROPTABLE +create database avc +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:avc POSTHOOK: query: -- SORT_QUERY_RESULTS -drop table alter_varchar_1 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table alter_varchar_1 (key string, value string) +create database avc +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:avc +PREHOOK: query: create table avc.alter_varchar_1 (key string, value string) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@alter_varchar_1 -POSTHOOK: query: create table alter_varchar_1 (key string, value string) +PREHOOK: Output: avc@alter_varchar_1 +PREHOOK: Output: database:avc +POSTHOOK: query: create table avc.alter_varchar_1 (key string, value string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@alter_varchar_1 -PREHOOK: query: insert overwrite table alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 +POSTHOOK: Output: database:avc +PREHOOK: query: insert overwrite table avc.alter_varchar_1 select key, value from src order by key limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alter_varchar_1 -POSTHOOK: query: insert overwrite table alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 +POSTHOOK: query: insert overwrite table avc.alter_varchar_1 select key, value from src order by key limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 POSTHOOK: Lineage: alter_varchar_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_varchar_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -40,24 +42,24 @@ POSTHOOK: Input: default@alter_varchar_1 10 val_10 100 val_100 PREHOOK: query: -- change column to varchar -alter table alter_varchar_1 change column value value varchar(20) +alter table avc.alter_varchar_1 change column value value varchar(20) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_varchar_1 -PREHOOK: Output: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 POSTHOOK: query: -- change column to varchar -alter table alter_varchar_1 change column value value varchar(20) +alter table avc.alter_varchar_1 change column value value varchar(20) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_varchar_1 -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 PREHOOK: query: -- contents should still look the same -select * from alter_varchar_1 +select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### POSTHOOK: query: -- contents should still look the same -select * from alter_varchar_1 +select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -65,24 +67,24 @@ POSTHOOK: Input: default@alter_varchar_1 10 val_10 100 val_100 PREHOOK: query: -- change column to smaller varchar -alter table alter_varchar_1 change column value value varchar(3) +alter table avc.alter_varchar_1 change column value value varchar(3) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_varchar_1 -PREHOOK: Output: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 POSTHOOK: query: -- change column to smaller varchar -alter table alter_varchar_1 change column value value varchar(3) +alter table avc.alter_varchar_1 change column value value varchar(3) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_varchar_1 -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 PREHOOK: query: -- value column should be truncated now -select * from alter_varchar_1 +select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### POSTHOOK: query: -- value column should be truncated now -select * from alter_varchar_1 +select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val 0 val @@ -90,24 +92,24 @@ POSTHOOK: Input: default@alter_varchar_1 10 val 100 val PREHOOK: query: -- change back to bigger varchar -alter table alter_varchar_1 change column value value varchar(20) +alter table avc.alter_varchar_1 change column value value varchar(20) PREHOOK: type: ALTERTABLE_RENAMECOL -PREHOOK: Input: default@alter_varchar_1 -PREHOOK: Output: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 POSTHOOK: query: -- change back to bigger varchar -alter table alter_varchar_1 change column value value varchar(20) +alter table avc.alter_varchar_1 change column value value varchar(20) POSTHOOK: type: ALTERTABLE_RENAMECOL -POSTHOOK: Input: default@alter_varchar_1 -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 PREHOOK: query: -- column values should be full size again -select * from alter_varchar_1 +select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### POSTHOOK: query: -- column values should be full size again -select * from alter_varchar_1 +select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -115,60 +117,68 @@ POSTHOOK: Input: default@alter_varchar_1 10 val_10 100 val_100 PREHOOK: query: -- add varchar column -alter table alter_varchar_1 add columns (key2 int, value2 varchar(10)) +alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) PREHOOK: type: ALTERTABLE_ADDCOLS -PREHOOK: Input: default@alter_varchar_1 -PREHOOK: Output: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 POSTHOOK: query: -- add varchar column -alter table alter_varchar_1 add columns (key2 int, value2 varchar(10)) +alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) POSTHOOK: type: ALTERTABLE_ADDCOLS -POSTHOOK: Input: default@alter_varchar_1 -POSTHOOK: Output: default@alter_varchar_1 -PREHOOK: query: select * from alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val_0 NULL NULL 0 val_0 NULL NULL 0 val_0 NULL NULL 10 val_10 NULL NULL 100 val_100 NULL NULL -PREHOOK: query: insert overwrite table alter_varchar_1 +PREHOOK: query: insert overwrite table avc.alter_varchar_1 select key, value, key, value from src order by key limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@alter_varchar_1 -POSTHOOK: query: insert overwrite table alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 +POSTHOOK: query: insert overwrite table avc.alter_varchar_1 select key, value, key, value from src order by key limit 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 POSTHOOK: Lineage: alter_varchar_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_varchar_1.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_varchar_1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: alter_varchar_1.value2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: select * from alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY -PREHOOK: Input: default@alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: select * from alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY -POSTHOOK: Input: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 10 val_10 10 val_10 100 val_100 100 val_100 -PREHOOK: query: drop table alter_varchar_1 +PREHOOK: query: drop table avc.alter_varchar_1 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@alter_varchar_1 -PREHOOK: Output: default@alter_varchar_1 -POSTHOOK: query: drop table alter_varchar_1 +PREHOOK: Input: avc@alter_varchar_1 +PREHOOK: Output: avc@alter_varchar_1 +POSTHOOK: query: drop table avc.alter_varchar_1 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@alter_varchar_1 -POSTHOOK: Output: default@alter_varchar_1 +POSTHOOK: Input: avc@alter_varchar_1 +POSTHOOK: Output: avc@alter_varchar_1 +PREHOOK: query: drop database avc +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:avc +PREHOOK: Output: database:avc +POSTHOOK: query: drop database avc +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:avc +POSTHOOK: Output: database:avc diff --git a/ql/src/test/results/clientpositive/alter_view_as_select.q.out b/ql/src/test/results/clientpositive/alter_view_as_select.q.out index 53a6b37..ff2d860 100644 --- a/ql/src/test/results/clientpositive/alter_view_as_select.q.out +++ b/ql/src/test/results/clientpositive/alter_view_as_select.q.out @@ -1,20 +1,25 @@ -PREHOOK: query: DROP VIEW testView -PREHOOK: type: DROPVIEW -POSTHOOK: query: DROP VIEW testView -POSTHOOK: type: DROPVIEW -PREHOOK: query: CREATE VIEW testView as SELECT * FROM srcpart +PREHOOK: query: CREATE DATABASE tv +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:tv +POSTHOOK: query: CREATE DATABASE tv +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:tv +PREHOOK: query: CREATE VIEW tv.testView as SELECT * FROM srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart -POSTHOOK: query: CREATE VIEW testView as SELECT * FROM srcpart +PREHOOK: Output: database:tv +PREHOOK: Output: tv@testView +POSTHOOK: query: CREATE VIEW tv.testView as SELECT * FROM srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@testView -PREHOOK: query: DESCRIBE FORMATTED testView +POSTHOOK: Output: database:tv +POSTHOOK: Output: tv@testView +PREHOOK: query: DESCRIBE FORMATTED tv.testView PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testview -POSTHOOK: query: DESCRIBE FORMATTED testView +PREHOOK: Input: tv@testview +POSTHOOK: query: DESCRIBE FORMATTED tv.testView POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testview +POSTHOOK: Input: tv@testview # col_name data_type comment key string @@ -23,7 +28,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: tv #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -43,25 +48,29 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM srcpart View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `default`.`srcpart` -PREHOOK: query: ALTER VIEW testView AS SELECT value FROM src WHERE key=86 +PREHOOK: query: ALTER VIEW tv.testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src -POSTHOOK: query: ALTER VIEW testView AS SELECT value FROM src WHERE key=86 +PREHOOK: Output: database:tv +PREHOOK: Output: tv@testView +POSTHOOK: query: ALTER VIEW tv.testView AS SELECT value FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src -POSTHOOK: Output: default@testview -PREHOOK: query: DESCRIBE FORMATTED testView +POSTHOOK: Output: database:tv +POSTHOOK: Output: tv@testView +POSTHOOK: Output: tv@testview +PREHOOK: query: DESCRIBE FORMATTED tv.testView PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testview -POSTHOOK: query: DESCRIBE FORMATTED testView +PREHOOK: Input: tv@testview +POSTHOOK: query: DESCRIBE FORMATTED tv.testView POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testview +POSTHOOK: Input: tv@testview # col_name data_type comment value string # Detailed Table Information -Database: default +Database: tv #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -81,34 +90,38 @@ Sort Columns: [] # View Information View Original Text: SELECT value FROM src WHERE key=86 View Expanded Text: SELECT `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 -PREHOOK: query: ALTER VIEW testView AS +PREHOOK: query: ALTER VIEW tv.testView AS SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key, value LIMIT 10 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src -POSTHOOK: query: ALTER VIEW testView AS +PREHOOK: Output: database:tv +PREHOOK: Output: tv@testView +POSTHOOK: query: ALTER VIEW tv.testView AS SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key, value LIMIT 10 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src -POSTHOOK: Output: default@testview -PREHOOK: query: DESCRIBE FORMATTED testView +POSTHOOK: Output: database:tv +POSTHOOK: Output: tv@testView +POSTHOOK: Output: tv@testview +PREHOOK: query: DESCRIBE FORMATTED tv.testView PREHOOK: type: DESCTABLE -PREHOOK: Input: default@testview -POSTHOOK: query: DESCRIBE FORMATTED testView +PREHOOK: Input: tv@testview +POSTHOOK: query: DESCRIBE FORMATTED tv.testView POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@testview +POSTHOOK: Input: tv@testview # col_name data_type comment key string value string # Detailed Table Information -Database: default +Database: tv #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -134,3 +147,19 @@ View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key` > 80 AND `src`.`key` < 100 ORDER BY `src`.`key`, `src`.`value` LIMIT 10 +PREHOOK: query: DROP VIEW tv.testView +PREHOOK: type: DROPVIEW +PREHOOK: Input: tv@testview +PREHOOK: Output: tv@testview +POSTHOOK: query: DROP VIEW tv.testView +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: tv@testview +POSTHOOK: Output: tv@testview +PREHOOK: query: DROP DATABASE tv +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:tv +PREHOOK: Output: database:tv +POSTHOOK: query: DROP DATABASE tv +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:tv +POSTHOOK: Output: database:tv diff --git a/ql/src/test/results/clientpositive/alter_view_rename.q.out b/ql/src/test/results/clientpositive/alter_view_rename.q.out index 0f3dd14..300b96c 100644 --- a/ql/src/test/results/clientpositive/alter_view_rename.q.out +++ b/ql/src/test/results/clientpositive/alter_view_rename.q.out @@ -1,3 +1,15 @@ +PREHOOK: query: CREATE DATABASE tv1 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:tv1 +POSTHOOK: query: CREATE DATABASE tv1 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:tv1 +PREHOOK: query: CREATE DATABASE tv2 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:tv2 +POSTHOOK: query: CREATE DATABASE tv2 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:tv2 PREHOOK: query: CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -6,53 +18,56 @@ POSTHOOK: query: CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds S POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@invites -PREHOOK: query: CREATE VIEW view1 as SELECT * FROM invites +PREHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites PREHOOK: type: CREATEVIEW PREHOOK: Input: default@invites -POSTHOOK: query: CREATE VIEW view1 as SELECT * FROM invites +PREHOOK: Output: database:tv1 +PREHOOK: Output: tv1@view1 +POSTHOOK: query: CREATE VIEW tv1.view1 as SELECT * FROM invites POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@invites -POSTHOOK: Output: default@view1 -PREHOOK: query: DESCRIBE EXTENDED view1 +POSTHOOK: Output: database:tv1 +POSTHOOK: Output: tv1@view1 +PREHOOK: query: DESCRIBE EXTENDED tv1.view1 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@view1 -POSTHOOK: query: DESCRIBE EXTENDED view1 +PREHOOK: Input: tv1@view1 +POSTHOOK: query: DESCRIBE EXTENDED tv1.view1 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@view1 +POSTHOOK: Input: tv1@view1 foo int bar string ds string #### A masked pattern was here #### -PREHOOK: query: ALTER VIEW view1 RENAME TO view2 +PREHOOK: query: ALTER VIEW tv1.view1 RENAME TO tv2.view2 PREHOOK: type: ALTERVIEW_RENAME -PREHOOK: Input: default@view1 -PREHOOK: Output: default@view1 -POSTHOOK: query: ALTER VIEW view1 RENAME TO view2 +PREHOOK: Input: tv1@view1 +PREHOOK: Output: tv1@view1 +POSTHOOK: query: ALTER VIEW tv1.view1 RENAME TO tv2.view2 POSTHOOK: type: ALTERVIEW_RENAME -POSTHOOK: Input: default@view1 -POSTHOOK: Output: default@view1 -POSTHOOK: Output: default@view2 -PREHOOK: query: DESCRIBE EXTENDED view2 +POSTHOOK: Input: tv1@view1 +POSTHOOK: Output: tv1@view1 +POSTHOOK: Output: tv2@view2 +PREHOOK: query: DESCRIBE EXTENDED tv2.view2 PREHOOK: type: DESCTABLE -PREHOOK: Input: default@view2 -POSTHOOK: query: DESCRIBE EXTENDED view2 +PREHOOK: Input: tv2@view2 +POSTHOOK: query: DESCRIBE EXTENDED tv2.view2 POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@view2 +POSTHOOK: Input: tv2@view2 foo int bar string ds string #### A masked pattern was here #### -PREHOOK: query: SELECT * FROM view2 +PREHOOK: query: SELECT * FROM tv2.view2 PREHOOK: type: QUERY PREHOOK: Input: default@invites -PREHOOK: Input: default@view2 +PREHOOK: Input: tv2@view2 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM view2 +POSTHOOK: query: SELECT * FROM tv2.view2 POSTHOOK: type: QUERY POSTHOOK: Input: default@invites -POSTHOOK: Input: default@view2 +POSTHOOK: Input: tv2@view2 #### A masked pattern was here #### PREHOOK: query: DROP TABLE invites PREHOOK: type: DROPTABLE @@ -62,11 +77,27 @@ POSTHOOK: query: DROP TABLE invites POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@invites POSTHOOK: Output: default@invites -PREHOOK: query: DROP VIEW view2 +PREHOOK: query: DROP VIEW tv2.view2 PREHOOK: type: DROPVIEW -PREHOOK: Input: default@view2 -PREHOOK: Output: default@view2 -POSTHOOK: query: DROP VIEW view2 +PREHOOK: Input: tv2@view2 +PREHOOK: Output: tv2@view2 +POSTHOOK: query: DROP VIEW tv2.view2 POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@view2 -POSTHOOK: Output: default@view2 +POSTHOOK: Input: tv2@view2 +POSTHOOK: Output: tv2@view2 +PREHOOK: query: DROP DATABASE tv1 +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:tv1 +PREHOOK: Output: database:tv1 +POSTHOOK: query: DROP DATABASE tv1 +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:tv1 +POSTHOOK: Output: database:tv1 +PREHOOK: query: DROP DATABASE tv2 +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:tv2 +PREHOOK: Output: database:tv2 +POSTHOOK: query: DROP DATABASE tv2 +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:tv2 +POSTHOOK: Output: database:tv2 diff --git a/ql/src/test/results/clientpositive/archive_multi.q.out b/ql/src/test/results/clientpositive/archive_multi.q.out index 7e84def..0ad29d1 100644 --- a/ql/src/test/results/clientpositive/archive_multi.q.out +++ b/ql/src/test/results/clientpositive/archive_multi.q.out @@ -1,161 +1,159 @@ -PREHOOK: query: drop table tstsrc -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrc -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table tstsrcpart -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrc like src +PREHOOK: query: create database ac_test +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:ac_test +POSTHOOK: query: create database ac_test +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:ac_test +PREHOOK: query: create table ac_test.tstsrc like default.src PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrc -POSTHOOK: query: create table tstsrc like src +PREHOOK: Output: ac_test@tstsrc +PREHOOK: Output: database:ac_test +POSTHOOK: query: create table ac_test.tstsrc like default.src POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrc -PREHOOK: query: insert overwrite table tstsrc select key, value from src +POSTHOOK: Output: ac_test@tstsrc +POSTHOOK: Output: database:ac_test +PREHOOK: query: insert overwrite table ac_test.tstsrc select key, value from default.src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tstsrc -POSTHOOK: query: insert overwrite table tstsrc select key, value from src +PREHOOK: Output: ac_test@tstsrc +POSTHOOK: query: insert overwrite table ac_test.tstsrc select key, value from default.src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tstsrc +POSTHOOK: Output: ac_test@tstsrc POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table ac_test.tstsrcpart like default.srcpart PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: ac_test@tstsrcpart +PREHOOK: Output: database:ac_test +POSTHOOK: query: create table ac_test.tstsrcpart like default.srcpart POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') -select key, value from srcpart where ds='2008-04-08' and hr='11' +POSTHOOK: Output: ac_test@tstsrcpart +POSTHOOK: Output: database:ac_test +PREHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='11') +select key, value from default.srcpart where ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11') -select key, value from srcpart where ds='2008-04-08' and hr='11' +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='11') +select key, value from default.srcpart where ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') -select key, value from srcpart where ds='2008-04-08' and hr='12' +PREHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='12') +select key, value from default.srcpart where ds='2008-04-08' and hr='12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') -select key, value from srcpart where ds='2008-04-08' and hr='12' +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-08', hr='12') +select key, value from default.srcpart where ds='2008-04-08' and hr='12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') -select key, value from srcpart where ds='2008-04-09' and hr='11' +PREHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='11') +select key, value from default.srcpart where ds='2008-04-09' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=11 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='11') -select key, value from srcpart where ds='2008-04-09' and hr='11' +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-09/hr=11 +POSTHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='11') +select key, value from default.srcpart where ds='2008-04-09' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=11 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-09/hr=11 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') -select key, value from srcpart where ds='2008-04-09' and hr='12' +PREHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='12') +select key, value from default.srcpart where ds='2008-04-09' and hr='12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=12 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-09', hr='12') -select key, value from srcpart where ds='2008-04-09' and hr='12' +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-09/hr=12 +POSTHOOK: query: insert overwrite table ac_test.tstsrcpart partition (ds='2008-04-09', hr='12') +select key, value from default.srcpart where ds='2008-04-09' and hr='12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-09/hr=12 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 -PREHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08') +PREHOOK: query: ALTER TABLE ac_test.tstsrcpart ARCHIVE PARTITION (ds='2008-04-08') PREHOOK: type: ALTERTABLE_ARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart ARCHIVE PARTITION (ds='2008-04-08') +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE ac_test.tstsrcpart ARCHIVE PARTITION (ds='2008-04-08') POSTHOOK: type: ALTERTABLE_ARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 -PREHOOK: query: SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key +PREHOOK: query: SELECT key, count(1) FROM ac_test.tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT key, count(1) FROM tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key +POSTHOOK: query: SELECT key, count(1) FROM ac_test.tstsrcpart WHERE ds='2008-04-08' AND hr='12' AND key='0' GROUP BY key POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 0 3 -PREHOOK: query: SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +PREHOOK: query: SELECT * FROM ac_test.tstsrcpart a JOIN ac_test.tstsrc b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0' PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrc -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: ac_test@tstsrc +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key +POSTHOOK: query: SELECT * FROM ac_test.tstsrcpart a JOIN ac_test.tstsrc b ON a.key=b.key WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0' POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrc -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrc +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 @@ -166,28 +164,28 @@ POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 0 val_0 2008-04-08 12 0 val_0 -PREHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08') +PREHOOK: query: ALTER TABLE ac_test.tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08') PREHOOK: type: ALTERTABLE_UNARCHIVE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08') +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE ac_test.tstsrcpart UNARCHIVE PARTITION (ds='2008-04-08') POSTHOOK: type: ALTERTABLE_UNARCHIVE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-08/hr=12 PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY -PREHOOK: Input: default@tstsrcpart -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: ac_test@tstsrcpart +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col -FROM (SELECT * FROM tstsrcpart WHERE ds='2008-04-08') subq1) subq2 +FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: ac_test@tstsrcpart +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 48479881068 diff --git a/ql/src/test/results/clientpositive/authorization_1.q.out b/ql/src/test/results/clientpositive/authorization_1.q.out index dac0820..f9f1b34 100644 --- a/ql/src/test/results/clientpositive/authorization_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_1.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_autho_test POSTHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_autho_test PREHOOK: query: --table grant to user @@ -254,40 +257,40 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT PREHOOK: query: --role -create role src_role +create role sRc_roLE PREHOOK: type: CREATEROLE POSTHOOK: query: --role -create role src_role +create role sRc_roLE POSTHOOK: type: CREATEROLE -PREHOOK: query: grant role src_role to user hive_test_user +PREHOOK: query: grant role sRc_roLE to user hive_test_user PREHOOK: type: GRANT_ROLE -POSTHOOK: query: grant role src_role to user hive_test_user +POSTHOOK: query: grant role sRc_roLE to user hive_test_user POSTHOOK: type: GRANT_ROLE PREHOOK: query: show role grant user hive_test_user PREHOOK: type: SHOW_ROLE_GRANT POSTHOOK: query: show role grant user hive_test_user POSTHOOK: type: SHOW_ROLE_GRANT public false -1 -src_role false -1 hive_test_user +sRc_roLE false -1 hive_test_user PREHOOK: query: --column grant to role -grant select(key) on table src_autho_test to role src_role +grant select(key) on table src_autho_test to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test POSTHOOK: query: --column grant to role -grant select(key) on table src_autho_test to role src_role +grant select(key) on table src_autho_test to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test -PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: query: show grant role sRc_roLE on table src_autho_test PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test POSTHOOK: type: SHOW_GRANT -PREHOOK: query: show grant role src_role on table src_autho_test(key) +PREHOOK: query: show grant role sRc_roLE on table src_autho_test(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -default src_autho_test [key] src_role ROLE SELECT false -1 hive_test_user +default src_autho_test [key] sRc_roLE ROLE SELECT false -1 hive_test_user PREHOOK: query: select key from src_autho_test order by key limit 20 PREHOOK: type: QUERY PREHOOK: Input: default@src_autho_test @@ -316,20 +319,20 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: revoke select(key) on table src_autho_test from role src_role +PREHOOK: query: revoke select(key) on table src_autho_test from role sRc_roLE PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select(key) on table src_autho_test from role src_role +POSTHOOK: query: revoke select(key) on table src_autho_test from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: --table grant to role -grant select on table src_autho_test to role src_role +grant select on table src_autho_test to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test POSTHOOK: query: --table grant to role -grant select on table src_autho_test to role src_role +grant select on table src_autho_test to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: select key from src_autho_test order by key limit 20 @@ -360,26 +363,26 @@ POSTHOOK: Input: default@src_autho_test 118 118 119 -PREHOOK: query: show grant role src_role on table src_autho_test +PREHOOK: query: show grant role sRc_roLE on table src_autho_test PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role src_role on table src_autho_test +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test POSTHOOK: type: SHOW_GRANT -default src_autho_test src_role ROLE SELECT false -1 hive_test_user -PREHOOK: query: show grant role src_role on table src_autho_test(key) +default src_autho_test sRc_roLE ROLE SELECT false -1 hive_test_user +PREHOOK: query: show grant role sRc_roLE on table src_autho_test(key) PREHOOK: type: SHOW_GRANT -POSTHOOK: query: show grant role src_role on table src_autho_test(key) +POSTHOOK: query: show grant role sRc_roLE on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: revoke select on table src_autho_test from role src_role +PREHOOK: query: revoke select on table src_autho_test from role sRc_roLE PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: revoke select on table src_autho_test from role src_role +POSTHOOK: query: revoke select on table src_autho_test from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: -- drop role -drop role src_role +drop role sRc_roLE PREHOOK: type: DROPROLE POSTHOOK: query: -- drop role -drop role src_role +drop role sRc_roLE POSTHOOK: type: DROPROLE PREHOOK: query: drop table src_autho_test PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/authorization_2.q.out b/ql/src/test/results/clientpositive/authorization_2.q.out index 0da0ec2..e92763c 100644 --- a/ql/src/test/results/clientpositive/authorization_2.q.out +++ b/ql/src/test/results/clientpositive/authorization_2.q.out @@ -13,9 +13,12 @@ POSTHOOK: Output: default@authorization_part PREHOOK: query: create table src_auth_tmp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_auth_tmp POSTHOOK: query: create table src_auth_tmp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_auth_tmp PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE") PREHOOK: type: ALTERTABLE_PROPERTIES diff --git a/ql/src/test/results/clientpositive/authorization_3.q.out b/ql/src/test/results/clientpositive/authorization_3.q.out index 8de1daf..2b53233 100644 --- a/ql/src/test/results/clientpositive/authorization_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_3.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_autho_test POSTHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant drop on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE diff --git a/ql/src/test/results/clientpositive/authorization_4.q.out b/ql/src/test/results/clientpositive/authorization_4.q.out index 881c128..67a30fd 100644 --- a/ql/src/test/results/clientpositive/authorization_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_4.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_autho_test POSTHOOK: query: -- SORT_BEFORE_DIFF create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant All on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE diff --git a/ql/src/test/results/clientpositive/authorization_5.q.out b/ql/src/test/results/clientpositive/authorization_5.q.out index 6e5187e..7917dba 100644 --- a/ql/src/test/results/clientpositive/authorization_5.q.out +++ b/ql/src/test/results/clientpositive/authorization_5.q.out @@ -28,34 +28,34 @@ POSTHOOK: query: SHOW GRANT USER hive_test_user ON DATABASE test_db POSTHOOK: type: SHOW_GRANT test_db hive_test_user USER DROP false -1 hive_test_user test_db hive_test_user USER SELECT false -1 hive_test_user -PREHOOK: query: CREATE ROLE db_test_role +PREHOOK: query: CREATE ROLE db_TEST_Role PREHOOK: type: CREATEROLE -POSTHOOK: query: CREATE ROLE db_test_role +POSTHOOK: query: CREATE ROLE db_TEST_Role POSTHOOK: type: CREATEROLE -PREHOOK: query: GRANT ROLE db_test_role TO USER hive_test_user +PREHOOK: query: GRANT ROLE db_TEST_Role TO USER hive_test_user PREHOOK: type: GRANT_ROLE -POSTHOOK: query: GRANT ROLE db_test_role TO USER hive_test_user +POSTHOOK: query: GRANT ROLE db_TEST_Role TO USER hive_test_user POSTHOOK: type: GRANT_ROLE PREHOOK: query: SHOW ROLE GRANT USER hive_test_user PREHOOK: type: SHOW_ROLE_GRANT POSTHOOK: query: SHOW ROLE GRANT USER hive_test_user POSTHOOK: type: SHOW_ROLE_GRANT -db_test_role false -1 hive_test_user +db_TEST_Role false -1 hive_test_user public false -1 -PREHOOK: query: GRANT drop ON DATABASE test_db TO ROLE db_test_role +PREHOOK: query: GRANT drop ON DATABASE test_db TO ROLE db_TEST_Role PREHOOK: type: GRANT_PRIVILEGE -POSTHOOK: query: GRANT drop ON DATABASE test_db TO ROLE db_test_role +POSTHOOK: query: GRANT drop ON DATABASE test_db TO ROLE db_TEST_Role POSTHOOK: type: GRANT_PRIVILEGE -PREHOOK: query: GRANT select ON DATABASE test_db TO ROLE db_test_role +PREHOOK: query: GRANT select ON DATABASE test_db TO ROLE db_TEST_Role PREHOOK: type: GRANT_PRIVILEGE -POSTHOOK: query: GRANT select ON DATABASE test_db TO ROLE db_test_role +POSTHOOK: query: GRANT select ON DATABASE test_db TO ROLE db_TEST_Role POSTHOOK: type: GRANT_PRIVILEGE -PREHOOK: query: SHOW GRANT ROLE db_test_role ON DATABASE test_db +PREHOOK: query: SHOW GRANT ROLE db_TEST_Role ON DATABASE test_db PREHOOK: type: SHOW_GRANT -POSTHOOK: query: SHOW GRANT ROLE db_test_role ON DATABASE test_db +POSTHOOK: query: SHOW GRANT ROLE db_TEST_Role ON DATABASE test_db POSTHOOK: type: SHOW_GRANT -test_db db_test_role ROLE DROP false -1 hive_test_user -test_db db_test_role ROLE SELECT false -1 hive_test_user +test_db db_TEST_Role ROLE DROP false -1 hive_test_user +test_db db_TEST_Role ROLE SELECT false -1 hive_test_user PREHOOK: query: DROP DATABASE IF EXISTS test_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_db diff --git a/ql/src/test/results/clientpositive/authorization_6.q.out b/ql/src/test/results/clientpositive/authorization_6.q.out index 02c4109..0341094 100644 --- a/ql/src/test/results/clientpositive/authorization_6.q.out +++ b/ql/src/test/results/clientpositive/authorization_6.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table src_auth_tmp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_auth_tmp POSTHOOK: query: -- SORT_BEFORE_DIFF create table src_auth_tmp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_auth_tmp PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/authorization_8.q.out b/ql/src/test/results/clientpositive/authorization_8.q.out index 47791fd..9fed5dc 100644 --- a/ql/src/test/results/clientpositive/authorization_8.q.out +++ b/ql/src/test/results/clientpositive/authorization_8.q.out @@ -13,9 +13,12 @@ POSTHOOK: Output: default@tbl_j5jbymsx8e PREHOOK: query: CREATE VIEW view_j5jbymsx8e_1 as SELECT * FROM tbl_j5jbymsx8e PREHOOK: type: CREATEVIEW PREHOOK: Input: default@tbl_j5jbymsx8e +PREHOOK: Output: database:default +PREHOOK: Output: default@view_j5jbymsx8e_1 POSTHOOK: query: CREATE VIEW view_j5jbymsx8e_1 as SELECT * FROM tbl_j5jbymsx8e POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@tbl_j5jbymsx8e +POSTHOOK: Output: database:default POSTHOOK: Output: default@view_j5jbymsx8e_1 PREHOOK: query: DESCRIBE view_j5jbymsx8e_1 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out b/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out index 79a0a95..a75d64b 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out @@ -23,9 +23,12 @@ default t_cli hive_test_user USER UPDATE true -1 hive_test_user PREHOOK: query: create view v_cli (i) as select i from t_cli PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t_cli +PREHOOK: Output: database:default +PREHOOK: Output: default@v_cli POSTHOOK: query: create view v_cli (i) as select i from t_cli POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t_cli +POSTHOOK: Output: database:default POSTHOOK: Output: default@v_cli PREHOOK: query: show grant user hive_test_user on v_cli PREHOOK: type: SHOW_GRANT diff --git a/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out b/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out index b171d6c..c87862d 100644 --- a/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out +++ b/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out @@ -1,9 +1,12 @@ PREHOOK: query: create table authorization_create_temp_table_1 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@authorization_create_temp_table_1 POSTHOOK: query: create table authorization_create_temp_table_1 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@authorization_create_temp_table_1 PREHOOK: query: grant select on authorization_create_temp_table_1 to user user1 PREHOOK: type: GRANT_PRIVILEGE diff --git a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out index 17b6c8a..2b7b3ad 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out @@ -12,12 +12,12 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@t_gpr1 PREHOOK: query: -- all privileges should have been set for user -GRANT ALL ON t_gpr1 TO ROLE public +GRANT ALL ON t_gpr1 TO ROLE pubLic PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_gpr1 POSTHOOK: query: -- all privileges should have been set for user -GRANT ALL ON t_gpr1 TO ROLE public +GRANT ALL ON t_gpr1 TO ROLE pubLic POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_gpr1 PREHOOK: query: SHOW GRANT USER user1 ON TABLE t_gpr1 @@ -28,9 +28,9 @@ default t_gpr1 user1 USER DELETE true -1 user1 default t_gpr1 user1 USER INSERT true -1 user1 default t_gpr1 user1 USER SELECT true -1 user1 default t_gpr1 user1 USER UPDATE true -1 user1 -PREHOOK: query: SHOW GRANT ROLE public ON TABLE t_gpr1 +PREHOOK: query: SHOW GRANT ROLE pubLic ON TABLE t_gpr1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: SHOW GRANT ROLE public ON TABLE t_gpr1 +POSTHOOK: query: SHOW GRANT ROLE pubLic ON TABLE t_gpr1 POSTHOOK: type: SHOW_GRANT default t_gpr1 public ROLE DELETE false -1 user1 default t_gpr1 public ROLE INSERT false -1 user1 diff --git a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out index ad81888..41d339c 100644 --- a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out +++ b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out @@ -35,9 +35,12 @@ POSTHOOK: Output: default@t1 PREHOOK: query: create view vt1 as select * from t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@vt1 POSTHOOK: query: create view vt1 as select * from t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@vt1 PREHOOK: query: drop view vt1 PREHOOK: type: DROPVIEW diff --git a/ql/src/test/results/clientpositive/authorization_role_grant2.q.out b/ql/src/test/results/clientpositive/authorization_role_grant2.q.out index 56e7667..27d4f33 100644 --- a/ql/src/test/results/clientpositive/authorization_role_grant2.q.out +++ b/ql/src/test/results/clientpositive/authorization_role_grant2.q.out @@ -60,9 +60,9 @@ PREHOOK: type: GRANT_ROLE POSTHOOK: query: -- grant role to another role grant src_role_wadmin to role sRc_role2 POSTHOOK: type: GRANT_ROLE -PREHOOK: query: set role ADMIN +PREHOOK: query: set role ADMIn PREHOOK: type: SHOW_ROLES -POSTHOOK: query: set role ADMIN +POSTHOOK: query: set role ADMIn POSTHOOK: type: SHOW_ROLES PREHOOK: query: grant src_role2 to user user3 PREHOOK: type: GRANT_ROLE diff --git a/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out b/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out index 279554d..39367be 100644 --- a/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out @@ -22,19 +22,25 @@ PREHOOK: query: -- protecting certain columns create view vt1 as select i,k from t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@vt1 POSTHOOK: query: -- protecting certain columns create view vt1 as select i,k from t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@vt1 PREHOOK: query: -- protecting certain rows create view vt2 as select * from t1 where i > 1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@vt2 POSTHOOK: query: -- protecting certain rows create view vt2 as select * from t1 where i > 1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@vt2 PREHOOK: query: show grant user user1 on all PREHOOK: type: SHOW_GRANT diff --git a/ql/src/test/results/clientpositive/autogen_colalias.q.out b/ql/src/test/results/clientpositive/autogen_colalias.q.out index 925b3b5..c748b27 100644 --- a/ql/src/test/results/clientpositive/autogen_colalias.q.out +++ b/ql/src/test/results/clientpositive/autogen_colalias.q.out @@ -10,12 +10,15 @@ PREHOOK: query: create table dest_grouped_old1 as select 1+1, 2+2 as zz, src.key from src group by src.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dest_grouped_old1 POSTHOOK: query: create table dest_grouped_old1 as select 1+1, 2+2 as zz, src.key, test_max(length(src.value)), count(src.value), sin(count(src.value)), count(sin(src.value)), unix_timestamp(), CAST(SUM(IF(value > 10, value, 1)) AS INT), if(src.key > 1, 1, 0) from src group by src.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_grouped_old1 PREHOOK: query: describe dest_grouped_old1 PREHOOK: type: DESCTABLE @@ -36,9 +39,12 @@ _c9 int PREHOOK: query: create table dest_grouped_old2 as select distinct src.key from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dest_grouped_old2 POSTHOOK: query: create table dest_grouped_old2 as select distinct src.key from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_grouped_old2 PREHOOK: query: describe dest_grouped_old2 PREHOOK: type: DESCTABLE @@ -53,12 +59,15 @@ PREHOOK: query: create table dest_grouped_new1 as select 1+1, 2+2 as zz, ((src.k from src group by src.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dest_grouped_new1 POSTHOOK: query: create table dest_grouped_new1 as select 1+1, 2+2 as zz, ((src.key % 2)+2)/2, test_max(length(src.value)), count(src.value), sin(count(src.value)), count(sin(src.value)), unix_timestamp(), CAST(SUM(IF(value > 10, value, 1)) AS INT), if(src.key > 10, (src.key +5) % 2, 0) from src group by src.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_grouped_new1 PREHOOK: query: describe dest_grouped_new1 PREHOOK: type: DESCTABLE @@ -79,9 +88,12 @@ if_src_key_10_src_ke_9 double PREHOOK: query: create table dest_grouped_new2 as select distinct src.key from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dest_grouped_new2 POSTHOOK: query: create table dest_grouped_new2 as select distinct src.key from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_grouped_new2 PREHOOK: query: describe dest_grouped_new2 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/char_nested_types.q.out b/ql/src/test/results/clientpositive/char_nested_types.q.out index 66b7bb6..07bf3c3 100644 --- a/ql/src/test/results/clientpositive/char_nested_types.q.out +++ b/ql/src/test/results/clientpositive/char_nested_types.q.out @@ -164,11 +164,14 @@ create table char_nested_cta as select * from char_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@char_nested_struct +PREHOOK: Output: database:default +PREHOOK: Output: default@char_nested_cta POSTHOOK: query: -- nested type with create table as create table char_nested_cta as select * from char_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@char_nested_struct +POSTHOOK: Output: database:default POSTHOOK: Output: default@char_nested_cta PREHOOK: query: describe char_nested_cta PREHOOK: type: DESCTABLE @@ -191,11 +194,14 @@ create table char_nested_view as select * from char_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@char_nested_struct +PREHOOK: Output: database:default +PREHOOK: Output: default@char_nested_view POSTHOOK: query: -- nested type with view create table char_nested_view as select * from char_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@char_nested_struct +POSTHOOK: Output: database:default POSTHOOK: Output: default@char_nested_view PREHOOK: query: describe char_nested_view PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out index 1186aa8..a16680b 100644 --- a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out +++ b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out @@ -17,9 +17,12 @@ POSTHOOK: Output: default@src_null PREHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null where a is null limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src_null +PREHOOK: Output: database:default +PREHOOK: Output: default@all_nulls POSTHOOK: query: create table all_nulls as SELECT a, cast(a as double) as b, cast(a as decimal) as c FROM src_null where a is null limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src_null +POSTHOOK: Output: database:default POSTHOOK: Output: default@all_nulls PREHOOK: query: analyze table all_nulls compute statistics for columns PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/create_big_view.q.out b/ql/src/test/results/clientpositive/create_big_view.q.out index 9efa517..020ac90 100644 --- a/ql/src/test/results/clientpositive/create_big_view.q.out +++ b/ql/src/test/results/clientpositive/create_big_view.q.out @@ -243,6 +243,8 @@ CREATE VIEW big_view AS SELECT FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@big_view POSTHOOK: query: -- Define a view with long SQL text to test metastore and other limits. CREATE VIEW big_view AS SELECT @@ -484,6 +486,7 @@ CREATE VIEW big_view AS SELECT FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@big_view PREHOOK: query: SELECT a FROM big_view LIMIT 1 diff --git a/ql/src/test/results/clientpositive/create_default_prop.q.out b/ql/src/test/results/clientpositive/create_default_prop.q.out index c841407..4b267ae 100644 --- a/ql/src/test/results/clientpositive/create_default_prop.q.out +++ b/ql/src/test/results/clientpositive/create_default_prop.q.out @@ -35,9 +35,12 @@ a string PREHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@table_p1 +PREHOOK: Output: database:default +PREHOOK: Output: default@table_p3 POSTHOOK: query: CREATE TABLE table_p3 AS SELECT * FROM table_p1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@table_p1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@table_p3 PREHOOK: query: DESC EXTENDED table_p3 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out index 2ac2847..e151897 100644 --- a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out +++ b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out @@ -181,11 +181,14 @@ PREHOOK: query: --Test that CREATE TALBE LIKE on a view can take explicit table CREATE VIEW test_view (key, value) AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@test_view POSTHOOK: query: --Test that CREATE TALBE LIKE on a view can take explicit table properties CREATE VIEW test_view (key, value) AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@test_view PREHOOK: query: CREATE TABLE test_table4 LIKE test_view TBLPROPERTIES('key'='value') PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out b/ql/src/test/results/clientpositive/create_like_view.q.out index bcd35d6..0978fcf 100644 --- a/ql/src/test/results/clientpositive/create_like_view.q.out +++ b/ql/src/test/results/clientpositive/create_like_view.q.out @@ -68,9 +68,12 @@ Storage Desc Params: PREHOOK: query: CREATE VIEW view1 AS SELECT * FROM table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@table1 +PREHOOK: Output: database:default +PREHOOK: Output: default@view1 POSTHOOK: query: CREATE VIEW view1 AS SELECT * FROM table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@table1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@view1 PREHOOK: query: CREATE TABLE table2 LIKE view1 PREHOOK: type: CREATETABLE @@ -242,10 +245,13 @@ PREHOOK: query: -- check partitions create view view1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@view1 POSTHOOK: query: -- check partitions create view view1 partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@view1 PREHOOK: query: create table table1 like view1 PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/create_or_replace_view.q.out b/ql/src/test/results/clientpositive/create_or_replace_view.q.out index 52ff417..aab78ba 100644 --- a/ql/src/test/results/clientpositive/create_or_replace_view.q.out +++ b/ql/src/test/results/clientpositive/create_or_replace_view.q.out @@ -1,20 +1,25 @@ -PREHOOK: query: drop view v -PREHOOK: type: DROPVIEW -POSTHOOK: query: drop view v -POSTHOOK: type: DROPVIEW -PREHOOK: query: create view v as select * from srcpart +PREHOOK: query: create database vt +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:vt +POSTHOOK: query: create database vt +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:vt +PREHOOK: query: create view vt.v as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart -POSTHOOK: query: create view v as select * from srcpart +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v +POSTHOOK: query: create view vt.v as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@v -PREHOOK: query: describe formatted v +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v +PREHOOK: query: describe formatted vt.v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: vt@v +POSTHOOK: query: describe formatted vt.v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v # col_name data_type comment key string @@ -23,7 +28,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: vt #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -44,61 +49,64 @@ Sort Columns: [] View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` PREHOOK: query: -- modifying definition of unpartitioned view -create or replace view v partitioned on (ds, hr) as select * from srcpart +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v POSTHOOK: query: -- modifying definition of unpartitioned view -create or replace view v partitioned on (ds, hr) as select * from srcpart +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@v -PREHOOK: query: alter view v add partition (ds='2008-04-08',hr='11') +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v +PREHOOK: query: alter view vt.v add partition (ds='2008-04-08',hr='11') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: alter view v add partition (ds='2008-04-08',hr='11') +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v +POSTHOOK: query: alter view vt.v add partition (ds='2008-04-08',hr='11') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -POSTHOOK: Output: default@v@ds=2008-04-08/hr=11 -PREHOOK: query: alter view v add partition (ds='2008-04-08',hr='12') +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v +POSTHOOK: Output: vt@v@ds=2008-04-08/hr=11 +PREHOOK: query: alter view vt.v add partition (ds='2008-04-08',hr='12') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: alter view v add partition (ds='2008-04-08',hr='12') +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v +POSTHOOK: query: alter view vt.v add partition (ds='2008-04-08',hr='12') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v -POSTHOOK: Output: default@v@ds=2008-04-08/hr=12 -PREHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v +POSTHOOK: Output: vt@v@ds=2008-04-08/hr=12 +PREHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@v +PREHOOK: Input: vt@v #### A masked pattern was here #### -POSTHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v #### A masked pattern was here #### 409 val_409 2008-04-08 11 409 val_409 2008-04-08 11 409 val_409 2008-04-08 11 -PREHOOK: query: describe formatted v +PREHOOK: query: describe formatted vt.v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: vt@v +POSTHOOK: query: describe formatted vt.v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v # col_name data_type comment key string @@ -111,7 +119,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: vt #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -131,44 +139,69 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` -PREHOOK: query: show partitions v +PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@v -POSTHOOK: query: show partitions v +PREHOOK: Input: vt@v +POSTHOOK: query: show partitions vt.v POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v ds=2008-04-08/hr=11 ds=2008-04-08/hr=12 +PREHOOK: query: alter view vt.v drop partition (ds='2008-04-08',hr='11') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v@ds=2008-04-08/hr=11 +POSTHOOK: query: alter view vt.v drop partition (ds='2008-04-08',hr='11') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v@ds=2008-04-08/hr=11 +PREHOOK: query: alter view vt.v drop partition (ds='2008-04-08',hr='12') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v@ds=2008-04-08/hr=12 +POSTHOOK: query: alter view vt.v drop partition (ds='2008-04-08',hr='12') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v@ds=2008-04-08/hr=12 +PREHOOK: query: show partitions vt.v +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: vt@v +POSTHOOK: query: show partitions vt.v +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: vt@v PREHOOK: query: -- altering partitioned view 1 -create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart +create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v POSTHOOK: query: -- altering partitioned view 1 -create or replace view v partitioned on (ds, hr) as select value, ds, hr from srcpart +create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@v -PREHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v +PREHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@v +PREHOOK: Input: vt@v #### A masked pattern was here #### -POSTHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v #### A masked pattern was here #### val_409 2008-04-08 11 val_409 2008-04-08 11 val_409 2008-04-08 11 -PREHOOK: query: describe formatted v +PREHOOK: query: describe formatted vt.v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: vt@v +POSTHOOK: query: describe formatted vt.v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v # col_name data_type comment value string @@ -180,7 +213,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: vt #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -200,44 +233,45 @@ Sort Columns: [] # View Information View Original Text: select value, ds, hr from srcpart View Expanded Text: select `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` -PREHOOK: query: show partitions v +PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@v -POSTHOOK: query: show partitions v +PREHOOK: Input: vt@v +POSTHOOK: query: show partitions vt.v POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@v -ds=2008-04-08/hr=11 -ds=2008-04-08/hr=12 +POSTHOOK: Input: vt@v PREHOOK: query: -- altering partitioned view 2 -create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart +create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v POSTHOOK: query: -- altering partitioned view 2 -create or replace view v partitioned on (ds, hr) as select key, value, ds, hr from srcpart +create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@v -PREHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v +PREHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -PREHOOK: Input: default@v +PREHOOK: Input: vt@v #### A masked pattern was here #### -POSTHOOK: query: select * from v where value='val_409' and ds='2008-04-08' and hr='11' +POSTHOOK: query: select * from vt.v where value='val_409' and ds='2008-04-08' and hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v #### A masked pattern was here #### 409 val_409 2008-04-08 11 409 val_409 2008-04-08 11 409 val_409 2008-04-08 11 -PREHOOK: query: describe formatted v +PREHOOK: query: describe formatted vt.v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: vt@v +POSTHOOK: query: describe formatted vt.v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v # col_name data_type comment key string @@ -250,7 +284,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: vt #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -270,22 +304,20 @@ Sort Columns: [] # View Information View Original Text: select key, value, ds, hr from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` -PREHOOK: query: show partitions v +PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@v -POSTHOOK: query: show partitions v +PREHOOK: Input: vt@v +POSTHOOK: query: show partitions vt.v POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@v -ds=2008-04-08/hr=11 -ds=2008-04-08/hr=12 -PREHOOK: query: drop view v +POSTHOOK: Input: vt@v +PREHOOK: query: drop view vt.v PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v +POSTHOOK: query: drop view vt.v POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v PREHOOK: query: -- updating to fix view with invalid definition create table srcpart_temp like srcpart PREHOOK: type: CREATETABLE @@ -296,13 +328,16 @@ create table srcpart_temp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_temp -PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart_temp +PREHOOK: query: create view vt.v partitioned on (ds, hr) as select * from srcpart_temp PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart_temp -POSTHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart_temp +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v +POSTHOOK: query: create view vt.v partitioned on (ds, hr) as select * from srcpart_temp POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart_temp -POSTHOOK: Output: default@v +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v PREHOOK: query: drop table srcpart_temp PREHOOK: type: DROPTABLE PREHOOK: Input: default@srcpart_temp @@ -311,21 +346,24 @@ POSTHOOK: query: drop table srcpart_temp POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@srcpart_temp POSTHOOK: Output: default@srcpart_temp -PREHOOK: query: -- v is now invalid -create or replace view v partitioned on (ds, hr) as select * from srcpart +PREHOOK: query: -- vt.v is now invalid +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart -POSTHOOK: query: -- v is now invalid -create or replace view v partitioned on (ds, hr) as select * from srcpart +PREHOOK: Output: database:vt +PREHOOK: Output: vt@v +POSTHOOK: query: -- vt.v is now invalid +create or replace view vt.v partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart -POSTHOOK: Output: default@v -PREHOOK: query: describe formatted v +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@v +PREHOOK: query: describe formatted vt.v PREHOOK: type: DESCTABLE -PREHOOK: Input: default@v -POSTHOOK: query: describe formatted v +PREHOOK: Input: vt@v +POSTHOOK: query: describe formatted vt.v POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@v +POSTHOOK: Input: vt@v # col_name data_type comment key string @@ -338,7 +376,7 @@ ds string hr string # Detailed Table Information -Database: default +Database: vt #### A masked pattern was here #### Protect Mode: None Retention: 0 @@ -358,11 +396,19 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` -PREHOOK: query: drop view v +PREHOOK: query: drop view vt.v PREHOOK: type: DROPVIEW -PREHOOK: Input: default@v -PREHOOK: Output: default@v -POSTHOOK: query: drop view v +PREHOOK: Input: vt@v +PREHOOK: Output: vt@v +POSTHOOK: query: drop view vt.v POSTHOOK: type: DROPVIEW -POSTHOOK: Input: default@v -POSTHOOK: Output: default@v +POSTHOOK: Input: vt@v +POSTHOOK: Output: vt@v +PREHOOK: query: drop database vt +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:vt +PREHOOK: Output: database:vt +POSTHOOK: query: drop database vt +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:vt +POSTHOOK: Output: database:vt diff --git a/ql/src/test/results/clientpositive/create_union_table.q.out b/ql/src/test/results/clientpositive/create_union_table.q.out index a3f0918..f2aedd1 100644 --- a/ql/src/test/results/clientpositive/create_union_table.q.out +++ b/ql/src/test/results/clientpositive/create_union_table.q.out @@ -15,7 +15,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: abc + name: default.abc PREHOOK: query: create table abc(mydata uniontype,struct>, strct struct) diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out index 45bae20..9f665d6 100644 --- a/ql/src/test/results/clientpositive/create_view.q.out +++ b/ql/src/test/results/clientpositive/create_view.q.out @@ -86,27 +86,36 @@ POSTHOOK: Input: default@src PREHOOK: query: CREATE VIEW view1 AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view1 POSTHOOK: query: CREATE VIEW view1 AS SELECT value FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view1 PREHOOK: query: CREATE VIEW view2 AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view2 POSTHOOK: query: CREATE VIEW view2 AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view2 PREHOOK: query: CREATE VIEW view3(valoo) TBLPROPERTIES ("fear" = "factor") AS SELECT upper(value) FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view3 POSTHOOK: query: CREATE VIEW view3(valoo) TBLPROPERTIES ("fear" = "factor") AS SELECT upper(value) FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view3 PREHOOK: query: SELECT * from view1 PREHOOK: type: QUERY @@ -159,8 +168,8 @@ STAGE PLANS: Create View or replace: false columns: valoo string - expanded text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `view0` - name: view0 + expanded text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view0` + name: default.view0 original text: SELECT upper(value) FROM src WHERE key=86 PREHOOK: query: -- make sure EXPLAIN works with a query which references a view @@ -354,7 +363,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 -View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `view3` +View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` PREHOOK: query: ALTER VIEW view3 SET TBLPROPERTIES ("biggest" = "loser") PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: default@view3 @@ -404,7 +413,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 -View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `view3` +View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` PREHOOK: query: CREATE TABLE table1 (key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -470,9 +479,12 @@ POSTHOOK: Input: default@table1 PREHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@table1 +PREHOOK: Output: database:default +PREHOOK: Output: default@view4 POSTHOOK: query: CREATE VIEW view4 AS SELECT * FROM table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@table1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@view4 PREHOOK: query: SELECT * FROM view4 PREHOOK: type: QUERY @@ -540,11 +552,14 @@ FROM view4 v1 join view4 v2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@table1 PREHOOK: Input: default@view4 +PREHOOK: Output: database:default +PREHOOK: Output: default@view5 POSTHOOK: query: CREATE VIEW view5 AS SELECT v1.key as key1, v2.key as key2 FROM view4 v1 join view4 v2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@table1 POSTHOOK: Input: default@view4 +POSTHOOK: Output: database:default POSTHOOK: Output: default@view5 Warning: Shuffle Join JOIN[6][tables = [v1, v2]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: SELECT * FROM view5 @@ -574,12 +589,15 @@ CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS SELECT upper(value) as blarg FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view6 POSTHOOK: query: -- verify that column name and comment in DDL portion -- overrides column alias in SELECT CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS SELECT upper(value) as blarg FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view6 PREHOOK: query: DESCRIBE view6 PREHOOK: type: DESCTABLE @@ -596,6 +614,8 @@ ORDER BY key, value LIMIT 10 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view7 POSTHOOK: query: -- verify that ORDER BY and LIMIT are both supported in view def CREATE VIEW view7 AS SELECT * FROM src @@ -604,6 +624,7 @@ ORDER BY key, value LIMIT 10 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view7 PREHOOK: query: SELECT * FROM view7 PREHOOK: type: QUERY @@ -705,11 +726,14 @@ SELECT test_translate('abc', 'a', 'b') FROM table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@table1 +PREHOOK: Output: database:default +PREHOOK: Output: default@view8 POSTHOOK: query: CREATE VIEW view8(c) AS SELECT test_translate('abc', 'a', 'b') FROM table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@table1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@view8 PREHOOK: query: DESCRIBE EXTENDED view8 PREHOOK: type: DESCTABLE @@ -721,7 +745,7 @@ c string #### A masked pattern was here #### FROM table1, viewExpandedText:SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') -FROM `default`.`table1`) `view8`, tableType:VIRTUAL_VIEW) +FROM `default`.`table1`) `default.view8`, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view8 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view8 @@ -754,7 +778,7 @@ Sort Columns: [] View Original Text: SELECT test_translate('abc', 'a', 'b') FROM table1 View Expanded Text: SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') -FROM `default`.`table1`) `view8` +FROM `default`.`table1`) `default.view8` PREHOOK: query: SELECT * FROM view8 PREHOOK: type: QUERY PREHOOK: Input: default@table1 @@ -782,12 +806,15 @@ SELECT test_max(length(value)) FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view9 POSTHOOK: query: -- disable map-side aggregation CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view9 PREHOOK: query: DESCRIBE EXTENDED view9 PREHOOK: type: DESCTABLE @@ -799,7 +826,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -832,7 +859,7 @@ Sort Columns: [] View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `view9` +FROM `default`.`src`) `default.view9` PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -858,12 +885,15 @@ SELECT test_max(length(value)) FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view9 POSTHOOK: query: -- enable map-side aggregation CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view9 PREHOOK: query: DESCRIBE EXTENDED view9 PREHOOK: type: DESCTABLE @@ -875,7 +905,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -908,7 +938,7 @@ Sort Columns: [] View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `view9` +FROM `default`.`src`) `default.view9` PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -925,11 +955,14 @@ CREATE VIEW view10 AS SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view10 POSTHOOK: query: -- test usage of a subselect within a view CREATE VIEW view10 AS SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view10 PREHOOK: query: DESCRIBE EXTENDED view10 PREHOOK: type: DESCTABLE @@ -999,11 +1032,14 @@ SELECT test_explode(array(1,2,3)) AS (boom) FROM table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@table1 +PREHOOK: Output: database:default +PREHOOK: Output: default@view11 POSTHOOK: query: CREATE VIEW view11 AS SELECT test_explode(array(1,2,3)) AS (boom) FROM table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@table1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@view11 PREHOOK: query: DESCRIBE EXTENDED view11 PREHOOK: type: DESCTABLE @@ -1067,11 +1103,14 @@ CREATE VIEW view12 AS SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view12 POSTHOOK: query: -- test usage of LATERAL within a view CREATE VIEW view12 AS SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view12 PREHOOK: query: DESCRIBE EXTENDED view12 PREHOOK: type: DESCTABLE @@ -1151,12 +1190,15 @@ SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcbucket +PREHOOK: Output: database:default +PREHOOK: Output: default@view13 POSTHOOK: query: -- test usage of TABLESAMPLE within a view CREATE VIEW view13 AS SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcbucket +POSTHOOK: Output: database:default POSTHOOK: Output: default@view13 PREHOOK: query: DESCRIBE EXTENDED view13 PREHOOK: type: DESCTABLE @@ -1240,6 +1282,8 @@ JOIN ON (unionsrc1.key = unionsrc2.key) PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view14 POSTHOOK: query: -- test usage of JOIN+UNION+AGG all within same view CREATE VIEW view14 AS SELECT unionsrc1.key as k1, unionsrc1.value as v1, @@ -1254,6 +1298,7 @@ JOIN ON (unionsrc1.key = unionsrc2.key) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view14 PREHOOK: query: DESCRIBE EXTENDED view14 PREHOOK: type: DESCTABLE @@ -1379,6 +1424,8 @@ FROM src GROUP BY key PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view15 POSTHOOK: query: -- test usage of GROUP BY within view CREATE VIEW view15 AS SELECT key,COUNT(value) AS value_count @@ -1386,6 +1433,7 @@ FROM src GROUP BY key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view15 PREHOOK: query: DESCRIBE EXTENDED view15 PREHOOK: type: DESCTABLE @@ -1467,12 +1515,15 @@ SELECT DISTINCT value FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view16 POSTHOOK: query: -- test usage of DISTINCT within view CREATE VIEW view16 AS SELECT DISTINCT value FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view16 PREHOOK: query: DESCRIBE EXTENDED view16 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out index 0216395..ebf9a6b 100644 --- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out +++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out @@ -20,6 +20,8 @@ FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@vp1 POSTHOOK: query: -- test partitioned view definition -- (underlying table is not actually partitioned) CREATE VIEW vp1 @@ -30,6 +32,7 @@ FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@vp1 PREHOOK: query: DESCRIBE EXTENDED vp1 PREHOOK: type: DESCTABLE @@ -245,6 +248,8 @@ PARTITIONED ON (hr) AS SELECT * FROM srcpart WHERE key < 10 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@vp2 POSTHOOK: query: -- test a partitioned view on top of an underlying partitioned table, -- but with only a suffix of the partitioning columns CREATE VIEW vp2 @@ -252,6 +257,7 @@ PARTITIONED ON (hr) AS SELECT * FROM srcpart WHERE key < 10 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@vp2 PREHOOK: query: DESCRIBE FORMATTED vp2 PREHOOK: type: DESCTABLE @@ -355,6 +361,8 @@ FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@vp3 POSTHOOK: query: -- test a partitioned view where the PARTITIONED ON clause references -- an imposed column name CREATE VIEW vp3(k,v) @@ -365,6 +373,7 @@ FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@vp3 PREHOOK: query: DESCRIBE FORMATTED vp3 PREHOOK: type: DESCTABLE @@ -405,7 +414,7 @@ FROM src WHERE key=86 View Expanded Text: SELECT `key` AS `k`, `value` AS `v` FROM (SELECT `src`.`key`, `src`.`value` FROM `default`.`src` -WHERE `src`.`key`=86) `vp3` +WHERE `src`.`key`=86) `default.vp3` PREHOOK: query: ALTER VIEW vp3 ADD PARTITION (v='val_86') PREHOOK: type: ALTERTABLE_ADDPARTS diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out index 94b4dd6..30a407b 100644 --- a/ql/src/test/results/clientpositive/create_view_translate.q.out +++ b/ql/src/test/results/clientpositive/create_view_translate.q.out @@ -9,9 +9,12 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: create view v as select cast(key as string) from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v POSTHOOK: query: create view v as select cast(key as string) from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@v PREHOOK: query: describe formatted v PREHOOK: type: DESCTABLE @@ -49,11 +52,14 @@ PREHOOK: query: create view w as select key, value from ( ) a PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@w POSTHOOK: query: create view w as select key, value from ( select key, value from src ) a POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@w PREHOOK: query: describe formatted w PREHOOK: type: DESCTABLE @@ -135,15 +141,18 @@ STAGE PLANS: or replace: false columns: id int, _c1 string expanded text: SELECT `items`.`id`, `items`.`info`['price'] FROM `default`.`items` - name: priceview + name: default.priceview original text: SELECT items.id, items.info['price'] FROM items PREHOOK: query: CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items PREHOOK: type: CREATEVIEW PREHOOK: Input: default@items +PREHOOK: Output: database:default +PREHOOK: Output: default@priceview POSTHOOK: query: CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@items +POSTHOOK: Output: database:default POSTHOOK: Output: default@priceview PREHOOK: query: select * from priceview PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out index bc7551c..87e356d 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out @@ -2,21 +2,27 @@ PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@A POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@A PREHOOK: query: create table B as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@B POSTHOOK: query: create table B as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@B Warning: Shuffle Join JOIN[4][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select * from A join B diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out index 29e9c7a..32d94a6 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out @@ -2,21 +2,27 @@ PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@A POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@A PREHOOK: query: create table B as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@B POSTHOOK: query: create table B as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@B Warning: Map Join MAPJOIN[7][bigTable=a] in task 'Stage-3:MAPRED' is a cross product PREHOOK: query: explain select * from A join B diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out index 3c3d22b..079118e 100644 --- a/ql/src/test/results/clientpositive/ctas.q.out +++ b/ql/src/test/results/clientpositive/ctas.q.out @@ -97,7 +97,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_CTAS1 + name: default.nzhang_CTAS1 Stage: Stage-3 Stats-Aggr Operator @@ -105,9 +105,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_CTAS1 POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_CTAS1 PREHOOK: query: select * from nzhang_CTAS1 PREHOOK: type: QUERY @@ -242,7 +245,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas2 + name: default.nzhang_ctas2 Stage: Stage-3 Stats-Aggr Operator @@ -250,9 +253,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas2 POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas2 PREHOOK: query: select * from nzhang_ctas2 PREHOOK: type: QUERY @@ -387,7 +393,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: nzhang_ctas3 + name: default.nzhang_ctas3 Stage: Stage-3 Stats-Aggr Operator @@ -395,9 +401,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas3 POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas3 PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY @@ -598,7 +607,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas4 + name: default.nzhang_ctas4 Stage: Stage-3 Stats-Aggr Operator @@ -606,9 +615,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas4 POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas4 PREHOOK: query: select * from nzhang_ctas4 PREHOOK: type: QUERY @@ -897,7 +909,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas5 + name: default.nzhang_ctas5 Stage: Stage-3 Stats-Aggr Operator @@ -906,9 +918,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas5 POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas5 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string) PREHOOK: type: CREATETABLE @@ -931,7 +946,10 @@ POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nzhang_ctas6 +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas7 POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nzhang_ctas6 +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas7 diff --git a/ql/src/test/results/clientpositive/ctas_char.q.out b/ql/src/test/results/clientpositive/ctas_char.q.out index 071034f..e8d5c41 100644 --- a/ql/src/test/results/clientpositive/ctas_char.q.out +++ b/ql/src/test/results/clientpositive/ctas_char.q.out @@ -34,19 +34,25 @@ PREHOOK: query: -- create table as with char column create table ctas_char_2 as select key, value from ctas_char_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_char_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_char_2 POSTHOOK: query: -- create table as with char column create table ctas_char_2 as select key, value from ctas_char_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_char_1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_char_2 PREHOOK: query: -- view with char column create view ctas_char_3 as select key, value from ctas_char_2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_char_2 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_char_3 POSTHOOK: query: -- view with char column create view ctas_char_3 as select key, value from ctas_char_2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_char_2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_char_3 PREHOOK: query: select key, value from ctas_char_1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out index 1da8dcd..bcd42dc 100644 --- a/ql/src/test/results/clientpositive/ctas_colname.q.out +++ b/ql/src/test/results/clientpositive/ctas_colname.q.out @@ -72,7 +72,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: summary + name: default.summary Stage: Stage-2 Stats-Aggr Operator @@ -80,9 +80,12 @@ STAGE PLANS: PREHOOK: query: create table summary as select *, sum(key), count(value) from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@summary POSTHOOK: query: create table summary as select *, sum(key), count(value) from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@summary PREHOOK: query: describe formatted summary PREHOOK: type: DESCTABLE @@ -189,7 +192,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x4 + name: default.x4 Stage: Stage-2 Stats-Aggr Operator @@ -197,9 +200,12 @@ STAGE PLANS: PREHOOK: query: create table x4 as select *, rank() over(partition by key order by value) as rr from src1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src1 +PREHOOK: Output: database:default +PREHOOK: Output: default@x4 POSTHOOK: query: create table x4 as select *, rank() over(partition by key order by value) as rr from src1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@x4 PREHOOK: query: describe formatted x4 PREHOOK: type: DESCTABLE @@ -354,7 +360,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x5 + name: default.x5 Stage: Stage-3 Stats-Aggr Operator @@ -362,9 +368,12 @@ STAGE PLANS: PREHOOK: query: create table x5 as select *, lead(key,1) over(partition by key order by value) as lead1 from src limit 20 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@x5 POSTHOOK: query: create table x5 as select *, lead(key,1) over(partition by key order by value) as lead1 from src limit 20 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@x5 PREHOOK: query: describe formatted x5 PREHOOK: type: DESCTABLE @@ -497,7 +506,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x6 + name: default.x6 Stage: Stage-2 Stats-Aggr Operator @@ -505,9 +514,12 @@ STAGE PLANS: PREHOOK: query: create table x6 as select * from (select *, max(key) from src1) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src1 +PREHOOK: Output: database:default +PREHOOK: Output: default@x6 POSTHOOK: query: create table x6 as select * from (select *, max(key) from src1) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@x6 PREHOOK: query: describe formatted x6 PREHOOK: type: DESCTABLE @@ -619,7 +631,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x7 + name: default.x7 Stage: Stage-2 Stats-Aggr Operator @@ -627,9 +639,12 @@ STAGE PLANS: PREHOOK: query: create table x7 as select * from (select * from src group by key) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@x7 POSTHOOK: query: create table x7 as select * from (select * from src group by key) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@x7 PREHOOK: query: describe formatted x7 PREHOOK: type: DESCTABLE @@ -1051,7 +1066,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x8 + name: default.x8 Stage: Stage-2 Stats-Aggr Operator @@ -1059,9 +1074,12 @@ STAGE PLANS: PREHOOK: query: create table x8 as select * from (select * from src group by key having key < 9) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@x8 POSTHOOK: query: create table x8 as select * from (select * from src group by key having key < 9) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@x8 PREHOOK: query: describe formatted x8 PREHOOK: type: DESCTABLE @@ -1185,7 +1203,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: x9 + name: default.x9 Stage: Stage-2 Stats-Aggr Operator @@ -1193,9 +1211,12 @@ STAGE PLANS: PREHOOK: query: create table x9 as select * from (select max(value),key from src group by key having key < 9 AND max(value) IS NOT NULL) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@x9 POSTHOOK: query: create table x9 as select * from (select max(value),key from src group by key having key < 9 AND max(value) IS NOT NULL) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@x9 PREHOOK: query: describe formatted x9 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/ctas_date.q.out b/ql/src/test/results/clientpositive/ctas_date.q.out index 97bcff9..884e649 100644 --- a/ql/src/test/results/clientpositive/ctas_date.q.out +++ b/ql/src/test/results/clientpositive/ctas_date.q.out @@ -39,26 +39,35 @@ PREHOOK: query: -- create table as with date column create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_date_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_date_2 POSTHOOK: query: -- create table as with date column create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_date_1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_date_2 PREHOOK: query: -- view with date column create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_date_2 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_date_3 POSTHOOK: query: -- view with date column create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_date_2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_date_3 PREHOOK: query: create view ctas_date_4 as select * from ctas_date_2 where dd < date '2000-01-01' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_date_2 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_date_4 POSTHOOK: query: create view ctas_date_4 as select * from ctas_date_2 where dd < date '2000-01-01' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_date_2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_date_4 PREHOOK: query: select key, value, dd, date '1980-12-12' from ctas_date_1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out index c1ee330..286b001 100644 --- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out +++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out @@ -79,7 +79,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: table_db1 + name: db1.table_db1 Stage: Stage-2 Stats-Aggr Operator @@ -117,9 +117,12 @@ STAGE PLANS: PREHOOK: query: CREATE TABLE table_db1 AS SELECT * FROM default.src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@table_db1 POSTHOOK: query: CREATE TABLE table_db1 AS SELECT * FROM default.src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@table_db1 PREHOOK: query: DESCRIBE FORMATTED table_db1 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/ctas_varchar.q.out b/ql/src/test/results/clientpositive/ctas_varchar.q.out index 6a2116f..9ceb64a 100644 --- a/ql/src/test/results/clientpositive/ctas_varchar.q.out +++ b/ql/src/test/results/clientpositive/ctas_varchar.q.out @@ -34,19 +34,25 @@ PREHOOK: query: -- create table as with varchar column create table ctas_varchar_2 as select key, value from ctas_varchar_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_varchar_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_varchar_2 POSTHOOK: query: -- create table as with varchar column create table ctas_varchar_2 as select key, value from ctas_varchar_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_varchar_1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_varchar_2 PREHOOK: query: -- view with varchar column create view ctas_varchar_3 as select key, value from ctas_varchar_2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_varchar_2 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas_varchar_3 POSTHOOK: query: -- view with varchar column create view ctas_varchar_3 as select key, value from ctas_varchar_2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_varchar_2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_varchar_3 PREHOOK: query: select key, value from ctas_varchar_1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out index 7539864..a8bc760 100644 --- a/ql/src/test/results/clientpositive/cte_2.q.out +++ b/ql/src/test/results/clientpositive/cte_2.q.out @@ -84,12 +84,15 @@ with q1 as ( select key from src where key = '4') select * from q1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@s2 POSTHOOK: query: -- ctas create table s2 as with q1 as ( select key from src where key = '4') select * from q1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@s2 PREHOOK: query: select * from s2 PREHOOK: type: QUERY @@ -114,12 +117,15 @@ with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 POSTHOOK: query: -- view test create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@v1 PREHOOK: query: select * from v1 PREHOOK: type: QUERY @@ -148,12 +154,15 @@ with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 POSTHOOK: query: -- view test, name collision create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@v1 PREHOOK: query: with q1 as ( select key from src where key = '4') select * from v1 diff --git a/ql/src/test/results/clientpositive/database.q.out b/ql/src/test/results/clientpositive/database.q.out index 0004119..ad25cd5 100644 --- a/ql/src/test/results/clientpositive/database.q.out +++ b/ql/src/test/results/clientpositive/database.q.out @@ -585,13 +585,12 @@ CREATE TABLE db1.src(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.src +PREHOOK: Output: db1@src POSTHOOK: query: -- CREATE foreign table CREATE TABLE db1.src(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.src POSTHOOK: Output: db1@src PREHOOK: query: -- LOAD into foreign table LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' @@ -1121,14 +1120,13 @@ PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.srcpart +PREHOOK: Output: db1@srcpart POSTHOOK: query: -- CREATE Partitioned foreign table CREATE TABLE db1.srcpart(key STRING, value STRING) PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.srcpart POSTHOOK: Output: db1@srcpart PREHOOK: query: -- LOAD data into Partitioned foreign table LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' @@ -1305,22 +1303,28 @@ CREATE TABLE conflict_name AS SELECT value FROM default.src WHERE key = 66 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:db2 +PREHOOK: Output: db2@conflict_name POSTHOOK: query: -- CREATE TABLE AS SELECT from foreign table CREATE TABLE conflict_name AS SELECT value FROM default.src WHERE key = 66 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:db2 POSTHOOK: Output: db2@conflict_name PREHOOK: query: -- CREATE foreign table CREATE TABLE db1.conflict_name AS SELECT value FROM db1.src WHERE key = 8 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: db1@src +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@conflict_name POSTHOOK: query: -- CREATE foreign table CREATE TABLE db1.conflict_name AS SELECT value FROM db1.src WHERE key = 8 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: db1@src +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@conflict_name PREHOOK: query: -- query tables with the same names in different DBs SELECT * FROM ( @@ -1407,12 +1411,11 @@ PREHOOK: query: -- CREATE TABLE LIKE CREATE TABLE db2.src1 LIKE default.src PREHOOK: type: CREATETABLE PREHOOK: Output: database:db2 -PREHOOK: Output: db2@db2.src1 +PREHOOK: Output: db2@src1 POSTHOOK: query: -- CREATE TABLE LIKE CREATE TABLE db2.src1 LIKE default.src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db2 -POSTHOOK: Output: db2@db2.src1 POSTHOOK: Output: db2@src1 PREHOOK: query: USE db2 PREHOOK: type: SWITCHDATABASE diff --git a/ql/src/test/results/clientpositive/database_drop.q.out b/ql/src/test/results/clientpositive/database_drop.q.out index bc78c94..f483c06 100644 --- a/ql/src/test/results/clientpositive/database_drop.q.out +++ b/ql/src/test/results/clientpositive/database_drop.q.out @@ -50,9 +50,12 @@ POSTHOOK: Output: db5@temp_tbl PREHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl PREHOOK: type: CREATEVIEW PREHOOK: Input: db5@temp_tbl +PREHOOK: Output: database:db5 +PREHOOK: Output: db5@temp_tbl_view POSTHOOK: query: CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db5@temp_tbl +POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl_view PREHOOK: query: CREATE INDEX idx1 ON TABLE temp_tbl(id) AS 'COMPACT' with DEFERRED REBUILD PREHOOK: type: CREATEINDEX @@ -95,9 +98,12 @@ POSTHOOK: Output: db5@temp_tbl2 PREHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2 PREHOOK: type: CREATEVIEW PREHOOK: Input: db5@temp_tbl2 +PREHOOK: Output: database:db5 +PREHOOK: Output: db5@temp_tbl2_view POSTHOOK: query: CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db5@temp_tbl2 +POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl2_view #### A masked pattern was here #### PREHOOK: type: CREATEINDEX diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out index bdf3c34..b147c02 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out @@ -59,9 +59,12 @@ POSTHOOK: Input: default@t1 PREHOOK: query: create table T3 as select * from T1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@T3 POSTHOOK: query: create table T3 as select * from T1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@T3 PREHOOK: query: create table T4 (key char(10), val decimal(5,2), b int) partitioned by (ds string) @@ -221,9 +224,12 @@ POSTHOOK: Output: default@t4@ds=tomorrow PREHOOK: query: create view V1 as select key from T1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@V1 POSTHOOK: query: create view V1 as select key from T1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@V1 PREHOOK: query: alter view V1 set tblproperties ('test'='thisisatest') PREHOOK: type: ALTERVIEW_PROPERTIES diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out index 5ce9780..071a015 100644 --- a/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out +++ b/ql/src/test/results/clientpositive/dbtxnmgr_query5.q.out @@ -28,9 +28,12 @@ POSTHOOK: Output: foo@t1@ds=today PREHOOK: query: create view V1 as select key from T1 PREHOOK: type: CREATEVIEW PREHOOK: Input: foo@t1 +PREHOOK: Output: database:foo +PREHOOK: Output: foo@V1 POSTHOOK: query: create view V1 as select key from T1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: foo@t1 +POSTHOOK: Output: database:foo POSTHOOK: Output: foo@V1 PREHOOK: query: show tables PREHOOK: type: SHOWTABLES diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out index 720966f..2bbd48a 100644 --- a/ql/src/test/results/clientpositive/decimal_6.q.out +++ b/ql/src/test/results/clientpositive/decimal_6.q.out @@ -129,9 +129,12 @@ NULL PREHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_6_1 +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_6_3 POSTHOOK: query: CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_6_1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL_6_3 PREHOOK: query: desc DECIMAL_6_3 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/decimal_serde.q.out b/ql/src/test/results/clientpositive/decimal_serde.q.out index 194d2ca..e461c2e 100644 --- a/ql/src/test/results/clientpositive/decimal_serde.q.out +++ b/ql/src/test/results/clientpositive/decimal_serde.q.out @@ -87,11 +87,14 @@ STORED AS RCFile AS SELECT * FROM DECIMAL_TEXT PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_text +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_RC POSTHOOK: query: CREATE TABLE DECIMAL_RC STORED AS RCFile AS SELECT * FROM DECIMAL_TEXT POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_text +POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL_RC PREHOOK: query: CREATE TABLE DECIMAL_LAZY_COL ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" @@ -99,12 +102,15 @@ STORED AS RCFile AS SELECT * FROM DECIMAL_RC PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_rc +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_LAZY_COL POSTHOOK: query: CREATE TABLE DECIMAL_LAZY_COL ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" STORED AS RCFile AS SELECT * FROM DECIMAL_RC POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_rc +POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL_LAZY_COL PREHOOK: query: CREATE TABLE DECIMAL_SEQUENCE ROW FORMAT DELIMITED @@ -115,6 +121,8 @@ STORED AS SEQUENCEFILE AS SELECT * FROM DECIMAL_LAZY_COL ORDER BY key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_lazy_col +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_SEQUENCE POSTHOOK: query: CREATE TABLE DECIMAL_SEQUENCE ROW FORMAT DELIMITED FIELDS TERMINATED BY '\001' @@ -124,6 +132,7 @@ STORED AS SEQUENCEFILE AS SELECT * FROM DECIMAL_LAZY_COL ORDER BY key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_lazy_col +POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL_SEQUENCE PREHOOK: query: SELECT * FROM DECIMAL_SEQUENCE ORDER BY key, value PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out index 73d4389..18768d9 100644 --- a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out +++ b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out @@ -10,6 +10,8 @@ FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view_partitioned POSTHOOK: query: CREATE VIEW view_partitioned PARTITIONED ON (value) AS @@ -18,6 +20,7 @@ FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view_partitioned PREHOOK: query: ALTER VIEW view_partitioned ADD PARTITION (value='val_86') diff --git a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out index 8435ebe..b0bac90 100644 --- a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out +++ b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned_json.q.out @@ -10,6 +10,8 @@ FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@view_partitioned POSTHOOK: query: CREATE VIEW view_partitioned PARTITIONED ON (value) AS @@ -18,6 +20,7 @@ FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@view_partitioned PREHOOK: query: ALTER VIEW view_partitioned ADD PARTITION (value='val_86') diff --git a/ql/src/test/results/clientpositive/describe_syntax.q.out b/ql/src/test/results/clientpositive/describe_syntax.q.out index 3e445f5..cb6f40e 100644 --- a/ql/src/test/results/clientpositive/describe_syntax.q.out +++ b/ql/src/test/results/clientpositive/describe_syntax.q.out @@ -7,11 +7,10 @@ POSTHOOK: Output: database:db1 PREHOOK: query: CREATE TABLE db1.t1(key1 INT, value1 STRING) PARTITIONED BY (ds STRING, part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.t1 +PREHOOK: Output: db1@t1 POSTHOOK: query: CREATE TABLE db1.t1(key1 INT, value1 STRING) PARTITIONED BY (ds STRING, part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.t1 POSTHOOK: Output: db1@t1 PREHOOK: query: use db1 PREHOOK: type: SWITCHDATABASE diff --git a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out index 58a472c..f723635 100644 --- a/ql/src/test/results/clientpositive/drop_multi_partitions.q.out +++ b/ql/src/test/results/clientpositive/drop_multi_partitions.q.out @@ -1,55 +1,63 @@ -PREHOOK: query: create table mp (a string) partitioned by (b string, c string) +PREHOOK: query: create database dmp +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:dmp +POSTHOOK: query: create database dmp +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:dmp +PREHOOK: query: create table dmp.mp (a string) partitioned by (b string, c string) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@mp -POSTHOOK: query: create table mp (a string) partitioned by (b string, c string) +PREHOOK: Output: database:dmp +PREHOOK: Output: dmp@mp +POSTHOOK: query: create table dmp.mp (a string) partitioned by (b string, c string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@mp -PREHOOK: query: alter table mp add partition (b='1', c='1') +POSTHOOK: Output: database:dmp +POSTHOOK: Output: dmp@mp +PREHOOK: query: alter table dmp.mp add partition (b='1', c='1') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@mp -POSTHOOK: query: alter table mp add partition (b='1', c='1') +PREHOOK: Output: dmp@mp +POSTHOOK: query: alter table dmp.mp add partition (b='1', c='1') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@mp -POSTHOOK: Output: default@mp@b=1/c=1 -PREHOOK: query: alter table mp add partition (b='1', c='2') +POSTHOOK: Output: dmp@mp +POSTHOOK: Output: dmp@mp@b=1/c=1 +PREHOOK: query: alter table dmp.mp add partition (b='1', c='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@mp -POSTHOOK: query: alter table mp add partition (b='1', c='2') +PREHOOK: Output: dmp@mp +POSTHOOK: query: alter table dmp.mp add partition (b='1', c='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@mp -POSTHOOK: Output: default@mp@b=1/c=2 -PREHOOK: query: alter table mp add partition (b='2', c='2') +POSTHOOK: Output: dmp@mp +POSTHOOK: Output: dmp@mp@b=1/c=2 +PREHOOK: query: alter table dmp.mp add partition (b='2', c='2') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@mp -POSTHOOK: query: alter table mp add partition (b='2', c='2') +PREHOOK: Output: dmp@mp +POSTHOOK: query: alter table dmp.mp add partition (b='2', c='2') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@mp -POSTHOOK: Output: default@mp@b=2/c=2 -PREHOOK: query: show partitions mp +POSTHOOK: Output: dmp@mp +POSTHOOK: Output: dmp@mp@b=2/c=2 +PREHOOK: query: show partitions dmp.mp PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@mp -POSTHOOK: query: show partitions mp +PREHOOK: Input: dmp@mp +POSTHOOK: query: show partitions dmp.mp POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@mp +POSTHOOK: Input: dmp@mp b=1/c=1 b=1/c=2 b=2/c=2 -PREHOOK: query: explain extended alter table mp drop partition (b='1') +PREHOOK: query: explain extended alter table dmp.mp drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: query: explain extended alter table mp drop partition (b='1') +POSTHOOK: query: explain extended alter table dmp.mp drop partition (b='1') POSTHOOK: type: ALTERTABLE_DROPPARTS ABSTRACT SYNTAX TREE: -TOK_ALTERTABLE_DROPPARTS +TOK_ALTERTABLE TOK_TABNAME + dmp mp - TOK_PARTSPEC - TOK_PARTVAL - b - = - '1' + TOK_ALTERTABLE_DROPPARTS + TOK_PARTSPEC + TOK_PARTVAL + b + = + '1' STAGE DEPENDENCIES: @@ -59,35 +67,51 @@ STAGE PLANS: Stage: Stage-0 Drop Table Operator: Drop Table - table: default.mp + table: dmp.mp -PREHOOK: query: alter table mp drop partition (b='1') +PREHOOK: query: alter table dmp.mp drop partition (b='1') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@mp -PREHOOK: Output: default@mp@b=1/c=1 -PREHOOK: Output: default@mp@b=1/c=2 -POSTHOOK: query: alter table mp drop partition (b='1') +PREHOOK: Input: dmp@mp +PREHOOK: Output: dmp@mp@b=1/c=1 +PREHOOK: Output: dmp@mp@b=1/c=2 +POSTHOOK: query: alter table dmp.mp drop partition (b='1') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@mp -POSTHOOK: Output: default@mp@b=1/c=1 -POSTHOOK: Output: default@mp@b=1/c=2 -PREHOOK: query: show partitions mp +POSTHOOK: Input: dmp@mp +POSTHOOK: Output: dmp@mp@b=1/c=1 +POSTHOOK: Output: dmp@mp@b=1/c=2 +PREHOOK: query: show partitions dmp.mp PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@mp -POSTHOOK: query: show partitions mp +PREHOOK: Input: dmp@mp +POSTHOOK: query: show partitions dmp.mp POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@mp +POSTHOOK: Input: dmp@mp b=2/c=2 -PREHOOK: query: alter table mp drop if exists partition (b='3') +PREHOOK: query: alter table dmp.mp drop if exists partition (b='3') PREHOOK: type: ALTERTABLE_DROPPARTS -PREHOOK: Input: default@mp -POSTHOOK: query: alter table mp drop if exists partition (b='3') +PREHOOK: Input: dmp@mp +POSTHOOK: query: alter table dmp.mp drop if exists partition (b='3') POSTHOOK: type: ALTERTABLE_DROPPARTS -POSTHOOK: Input: default@mp -PREHOOK: query: show partitions mp +POSTHOOK: Input: dmp@mp +PREHOOK: query: show partitions dmp.mp PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@mp -POSTHOOK: query: show partitions mp +PREHOOK: Input: dmp@mp +POSTHOOK: query: show partitions dmp.mp POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@mp +POSTHOOK: Input: dmp@mp b=2/c=2 +PREHOOK: query: drop table dmp.mp +PREHOOK: type: DROPTABLE +PREHOOK: Input: dmp@mp +PREHOOK: Output: dmp@mp +POSTHOOK: query: drop table dmp.mp +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: dmp@mp +POSTHOOK: Output: dmp@mp +PREHOOK: query: drop database dmp +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:dmp +PREHOOK: Output: database:dmp +POSTHOOK: query: drop database dmp +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:dmp +POSTHOOK: Output: database:dmp diff --git a/ql/src/test/results/clientpositive/exchange_partition.q.out b/ql/src/test/results/clientpositive/exchange_partition.q.out index 381a9fd..4ff1f6c 100644 --- a/ql/src/test/results/clientpositive/exchange_partition.q.out +++ b/ql/src/test/results/clientpositive/exchange_partition.q.out @@ -1,65 +1,77 @@ -PREHOOK: query: CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING) +PREHOOK: query: create database ex1 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:ex1 +POSTHOOK: query: create database ex1 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:ex1 +PREHOOK: query: create database ex2 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:ex2 +POSTHOOK: query: create database ex2 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:ex2 +PREHOOK: query: CREATE TABLE ex1.exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@exchange_part_test1 -POSTHOOK: query: CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING) +PREHOOK: Output: database:ex1 +PREHOOK: Output: ex1@exchange_part_test1 +POSTHOOK: query: CREATE TABLE ex1.exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@exchange_part_test1 -PREHOOK: query: CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING) +POSTHOOK: Output: database:ex1 +POSTHOOK: Output: ex1@exchange_part_test1 +PREHOOK: query: CREATE TABLE ex2.exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@exchange_part_test2 -POSTHOOK: query: CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING) +PREHOOK: Output: database:ex2 +PREHOOK: Output: ex2@exchange_part_test2 +POSTHOOK: query: CREATE TABLE ex2.exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@exchange_part_test2 -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +POSTHOOK: Output: database:ex2 +POSTHOOK: Output: ex2@exchange_part_test2 +PREHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: ex1@exchange_part_test1 +POSTHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +POSTHOOK: Input: ex1@exchange_part_test1 +PREHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: ex2@exchange_part_test2 +POSTHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 -PREHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05') +POSTHOOK: Input: ex2@exchange_part_test2 +PREHOOK: query: ALTER TABLE ex2.exchange_part_test2 ADD PARTITION (ds='2013-04-05') PREHOOK: type: ALTERTABLE_ADDPARTS -PREHOOK: Output: default@exchange_part_test2 -POSTHOOK: query: ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05') +PREHOOK: Output: ex2@exchange_part_test2 +POSTHOOK: query: ALTER TABLE ex2.exchange_part_test2 ADD PARTITION (ds='2013-04-05') POSTHOOK: type: ALTERTABLE_ADDPARTS -POSTHOOK: Output: default@exchange_part_test2 -POSTHOOK: Output: default@exchange_part_test2@ds=2013-04-05 -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +POSTHOOK: Output: ex2@exchange_part_test2 +POSTHOOK: Output: ex2@exchange_part_test2@ds=2013-04-05 +PREHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: ex1@exchange_part_test1 +POSTHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +POSTHOOK: Input: ex1@exchange_part_test1 +PREHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: ex2@exchange_part_test2 +POSTHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 +POSTHOOK: Input: ex2@exchange_part_test2 ds=2013-04-05 -PREHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +PREHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2 PREHOOK: type: null -POSTHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +POSTHOOK: query: ALTER TABLE ex1.exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE ex2.exchange_part_test2 POSTHOOK: type: null -PREHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test1 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test1 +PREHOOK: Input: ex1@exchange_part_test1 +POSTHOOK: query: SHOW PARTITIONS ex1.exchange_part_test1 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test1 +POSTHOOK: Input: ex1@exchange_part_test1 ds=2013-04-05 -PREHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 PREHOOK: type: SHOWPARTITIONS -PREHOOK: Input: default@exchange_part_test2 -POSTHOOK: query: SHOW PARTITIONS exchange_part_test2 +PREHOOK: Input: ex2@exchange_part_test2 +POSTHOOK: query: SHOW PARTITIONS ex2.exchange_part_test2 POSTHOOK: type: SHOWPARTITIONS -POSTHOOK: Input: default@exchange_part_test2 +POSTHOOK: Input: ex2@exchange_part_test2 diff --git a/ql/src/test/results/clientpositive/explain_dependency.q.out b/ql/src/test/results/clientpositive/explain_dependency.q.out index 8f2ab34..cb98d54 100644 --- a/ql/src/test/results/clientpositive/explain_dependency.q.out +++ b/ql/src/test/results/clientpositive/explain_dependency.q.out @@ -4,19 +4,25 @@ PREHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command CREATE VIEW V1 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@V1 POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command -- Create some views CREATE VIEW V1 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@V1 PREHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@V2 POSTHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@V2 PREHOOK: query: CREATE VIEW V3 AS SELECT src1.key, src2.value FROM V2 src1 @@ -25,6 +31,8 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@v2 +PREHOOK: Output: database:default +PREHOOK: Output: default@V3 POSTHOOK: query: CREATE VIEW V3 AS SELECT src1.key, src2.value FROM V2 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL @@ -32,6 +40,7 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@V3 PREHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 @@ -41,6 +50,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@v1 PREHOOK: Input: default@v2 +PREHOOK: Output: database:default +PREHOOK: Output: default@V4 POSTHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key @@ -49,6 +60,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v1 POSTHOOK: Input: default@v2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@V4 PREHOOK: query: -- Simple select queries, union queries and join queries EXPLAIN DEPENDENCY @@ -111,11 +123,14 @@ PREHOOK: query: -- The table should show up in the explain dependency even if no CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@V5 POSTHOOK: query: -- The table should show up in the explain dependency even if none -- of the partitions are selected. CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@V5 PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V5 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index 862422a..5563777 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -4,19 +4,25 @@ PREHOOK: query: -- This test is used for testing EXPLAIN LOGICAL command CREATE VIEW V1 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@V1 POSTHOOK: query: -- This test is used for testing EXPLAIN LOGICAL command -- Create some views CREATE VIEW V1 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@V1 PREHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@V2 POSTHOOK: query: CREATE VIEW V2 AS SELECT ds, key, value FROM srcpart WHERE ds IS NOT NULL POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@V2 PREHOOK: query: CREATE VIEW V3 AS SELECT src1.key, src2.value FROM V2 src1 @@ -25,6 +31,8 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@v2 +PREHOOK: Output: database:default +PREHOOK: Output: default@V3 POSTHOOK: query: CREATE VIEW V3 AS SELECT src1.key, src2.value FROM V2 src1 JOIN src src2 ON src1.key = src2.key WHERE src1.ds IS NOT NULL @@ -32,6 +40,7 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@V3 PREHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 @@ -41,6 +50,8 @@ PREHOOK: Input: default@src PREHOOK: Input: default@srcpart PREHOOK: Input: default@v1 PREHOOK: Input: default@v2 +PREHOOK: Output: database:default +PREHOOK: Output: default@V4 POSTHOOK: query: CREATE VIEW V4 AS SELECT src1.key, src2.value as value1, src3.value as value2 FROM V1 src1 JOIN V2 src2 on src1.key = src2.key JOIN src src3 ON src2.key = src3.key @@ -49,6 +60,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@v1 POSTHOOK: Input: default@v2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@V4 PREHOOK: query: -- Simple select queries, union queries and join queries EXPLAIN LOGICAL @@ -678,11 +690,14 @@ PREHOOK: query: -- The table should show up in the explain logical even if none CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart +PREHOOK: Output: database:default +PREHOOK: Output: default@V5 POSTHOOK: query: -- The table should show up in the explain logical even if none -- of the partitions are selected. CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart +POSTHOOK: Output: database:default POSTHOOK: Output: default@V5 PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V5 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out index 94a8870..44c1030 100644 --- a/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out +++ b/ql/src/test/results/clientpositive/fileformat_sequencefile.q.out @@ -18,7 +18,7 @@ STAGE PLANS: columns: key int, value string input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.mapred.SequenceFileOutputFormat - name: dest1 + name: default.dest1 PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.SequenceFileInputFormat' diff --git a/ql/src/test/results/clientpositive/fileformat_text.q.out b/ql/src/test/results/clientpositive/fileformat_text.q.out index 4815d05..a6f8889 100644 --- a/ql/src/test/results/clientpositive/fileformat_text.q.out +++ b/ql/src/test/results/clientpositive/fileformat_text.q.out @@ -18,7 +18,7 @@ STAGE PLANS: columns: key int, value string input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - name: dest1 + name: default.dest1 PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' diff --git a/ql/src/test/results/clientpositive/global_limit.q.out b/ql/src/test/results/clientpositive/global_limit.q.out index 31cbeb1..a4923a3 100644 --- a/ql/src/test/results/clientpositive/global_limit.q.out +++ b/ql/src/test/results/clientpositive/global_limit.q.out @@ -50,10 +50,13 @@ PREHOOK: query: -- need one file create table gl_tgt as select key from gl_src1 limit 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@gl_src1 +PREHOOK: Output: database:default +PREHOOK: Output: default@gl_tgt POSTHOOK: query: -- need one file create table gl_tgt as select key from gl_src1 limit 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@gl_src1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@gl_tgt PREHOOK: query: select * from gl_tgt ORDER BY key ASC PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out index 2dd4156..b468e70 100644 --- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out +++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out @@ -140,7 +140,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: dummy + name: default.dummy Stage: Stage-2 Stats-Aggr Operator @@ -149,10 +149,13 @@ PREHOOK: query: create table dummy as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dummy POSTHOOK: query: create table dummy as select distinct key, "X" as dummy1, "X" as dummy2 from src tablesample (10 rows) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dummy PREHOOK: query: select key,dummy1,dummy2 from dummy PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/index_auto_empty.q.out b/ql/src/test/results/clientpositive/index_auto_empty.q.out index 6a1a6c5..5039024 100644 --- a/ql/src/test/results/clientpositive/index_auto_empty.q.out +++ b/ql/src/test/results/clientpositive/index_auto_empty.q.out @@ -1,50 +1,56 @@ PREHOOK: query: -- Test to ensure that an empty index result is propagated correctly --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@temp +CREATE DATABASE it +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:it POSTHOOK: query: -- Test to ensure that an empty index result is propagated correctly --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE +CREATE DATABASE it +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:it +PREHOOK: query: -- Create temp, and populate it with some values in src. +CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:it +PREHOOK: Output: it@temp +POSTHOOK: query: -- Create temp, and populate it with some values in src. +CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@temp -PREHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: Output: database:it +POSTHOOK: Output: it@temp +PREHOOK: query: -- Build an index on it.temp. +CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -PREHOOK: Input: default@temp -POSTHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: Input: it@temp +POSTHOOK: query: -- Build an index on it.temp. +CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -PREHOOK: query: ALTER INDEX temp_index ON temp REBUILD +POSTHOOK: Input: it@temp +POSTHOOK: Output: it@it__temp_temp_index__ +PREHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD PREHOOK: type: ALTERINDEX_REBUILD -PREHOOK: Input: default@temp -PREHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: query: ALTER INDEX temp_index ON temp REBUILD +PREHOOK: Input: it@temp +PREHOOK: Output: it@it__temp_temp_index__ +POSTHOOK: query: ALTER INDEX temp_index ON it.temp REBUILD POSTHOOK: type: ALTERINDEX_REBUILD -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@default__temp_temp_index__ -POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] -POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Input: it@temp +POSTHOOK: Output: it@it__temp_temp_index__ +POSTHOOK: Lineage: it__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: it__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: it__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] PREHOOK: query: -- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86 +SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 PREHOOK: type: QUERY -PREHOOK: Input: default@default__temp_temp_index__ +PREHOOK: Input: it@it__temp_temp_index__ #### A masked pattern was here #### POSTHOOK: query: -- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86 +SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 POSTHOOK: type: QUERY -POSTHOOK: Input: default@default__temp_temp_index__ +POSTHOOK: Input: it@it__temp_temp_index__ #### A masked pattern was here #### -PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 +PREHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 +POSTHOOK: query: EXPLAIN SELECT * FROM it.temp WHERE key = 86 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -79,19 +85,27 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT * FROM temp WHERE key = 86 +PREHOOK: query: SELECT * FROM it.temp WHERE key = 86 PREHOOK: type: QUERY -PREHOOK: Input: default@temp +PREHOOK: Input: it@temp #### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM temp WHERE key = 86 +POSTHOOK: query: SELECT * FROM it.temp WHERE key = 86 POSTHOOK: type: QUERY -POSTHOOK: Input: default@temp +POSTHOOK: Input: it@temp #### A masked pattern was here #### -PREHOOK: query: DROP table temp +PREHOOK: query: DROP table it.temp PREHOOK: type: DROPTABLE -PREHOOK: Input: default@temp -PREHOOK: Output: default@temp -POSTHOOK: query: DROP table temp +PREHOOK: Input: it@temp +PREHOOK: Output: it@temp +POSTHOOK: query: DROP table it.temp POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@temp -POSTHOOK: Output: default@temp +POSTHOOK: Input: it@temp +POSTHOOK: Output: it@temp +PREHOOK: query: DROP DATABASE it +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:it +PREHOOK: Output: database:it +POSTHOOK: query: DROP DATABASE it +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:it +POSTHOOK: Output: database:it diff --git a/ql/src/test/results/clientpositive/input15.q.out b/ql/src/test/results/clientpositive/input15.q.out index adfc307..13bdbf2 100644 --- a/ql/src/test/results/clientpositive/input15.q.out +++ b/ql/src/test/results/clientpositive/input15.q.out @@ -16,7 +16,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: TEST15 + name: default.TEST15 PREHOOK: query: CREATE TABLE TEST15(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/input46.q.out b/ql/src/test/results/clientpositive/input46.q.out index c2ac396..4240566 100644 --- a/ql/src/test/results/clientpositive/input46.q.out +++ b/ql/src/test/results/clientpositive/input46.q.out @@ -7,34 +7,38 @@ POSTHOOK: Output: database:table_in_database_creation PREHOOK: query: create table table_in_database_creation.test1 as select * from src limit 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:table_in_database_creation +PREHOOK: Output: table_in_database_creation@test1 POSTHOOK: query: create table table_in_database_creation.test1 as select * from src limit 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:table_in_database_creation POSTHOOK: Output: table_in_database_creation@test1 -PREHOOK: query: create table `table_in_database_creation.test2` as select * from src limit 1 +PREHOOK: query: create table `table_in_database_creation`.`test2` as select * from src limit 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src -POSTHOOK: query: create table `table_in_database_creation.test2` as select * from src limit 1 +PREHOOK: Output: database:table_in_database_creation +PREHOOK: Output: table_in_database_creation@test2 +POSTHOOK: query: create table `table_in_database_creation`.`test2` as select * from src limit 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:table_in_database_creation POSTHOOK: Output: table_in_database_creation@test2 PREHOOK: query: create table table_in_database_creation.test3 (a string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:table_in_database_creation -PREHOOK: Output: table_in_database_creation@table_in_database_creation.test3 +PREHOOK: Output: table_in_database_creation@test3 POSTHOOK: query: create table table_in_database_creation.test3 (a string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:table_in_database_creation -POSTHOOK: Output: table_in_database_creation@table_in_database_creation.test3 POSTHOOK: Output: table_in_database_creation@test3 -PREHOOK: query: create table `table_in_database_creation.test4` (a string) +PREHOOK: query: create table `table_in_database_creation`.`test4` (a string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:table_in_database_creation -PREHOOK: Output: table_in_database_creation@table_in_database_creation.test4 -POSTHOOK: query: create table `table_in_database_creation.test4` (a string) +PREHOOK: Output: table_in_database_creation@test4 +POSTHOOK: query: create table `table_in_database_creation`.`test4` (a string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:table_in_database_creation -POSTHOOK: Output: table_in_database_creation@table_in_database_creation.test4 POSTHOOK: Output: table_in_database_creation@test4 PREHOOK: query: drop database table_in_database_creation cascade PREHOOK: type: DROPDATABASE diff --git a/ql/src/test/results/clientpositive/inputddl1.q.out b/ql/src/test/results/clientpositive/inputddl1.q.out index e3d21bf..12d655d 100644 --- a/ql/src/test/results/clientpositive/inputddl1.q.out +++ b/ql/src/test/results/clientpositive/inputddl1.q.out @@ -15,7 +15,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: INPUTDDL1 + name: default.INPUTDDL1 PREHOOK: query: CREATE TABLE INPUTDDL1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/inputddl2.q.out b/ql/src/test/results/clientpositive/inputddl2.q.out index f5c36c3..3d344d5 100644 --- a/ql/src/test/results/clientpositive/inputddl2.q.out +++ b/ql/src/test/results/clientpositive/inputddl2.q.out @@ -16,7 +16,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat partition columns: ds string, country string serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: INPUTDDL2 + name: default.INPUTDDL2 PREHOOK: query: CREATE TABLE INPUTDDL2(key INT, value STRING) PARTITIONED BY(ds STRING, country STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/inputddl3.q.out b/ql/src/test/results/clientpositive/inputddl3.q.out index e1ca708..035cbe9 100644 --- a/ql/src/test/results/clientpositive/inputddl3.q.out +++ b/ql/src/test/results/clientpositive/inputddl3.q.out @@ -16,7 +16,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: INPUTDDL3 + name: default.INPUTDDL3 PREHOOK: query: CREATE TABLE INPUTDDL3(key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out index 1486b86..5816a2c 100644 --- a/ql/src/test/results/clientpositive/insert1.q.out +++ b/ql/src/test/results/clientpositive/insert1.q.out @@ -217,12 +217,11 @@ POSTHOOK: Output: database:x PREHOOK: query: create table x.insert1(key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:x -PREHOOK: Output: x@x.insert1 +PREHOOK: Output: x@insert1 POSTHOOK: query: create table x.insert1(key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:x POSTHOOK: Output: x@insert1 -POSTHOOK: Output: x@x.insert1 PREHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY POSTHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) @@ -635,11 +634,10 @@ POSTHOOK: Output: database:db1 PREHOOK: query: CREATE TABLE db1.result(col1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.result +PREHOOK: Output: db1@result POSTHOOK: query: CREATE TABLE db1.result(col1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.result POSTHOOK: Output: db1@result PREHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out index ee7e1e8..25c438f 100644 --- a/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out +++ b/ql/src/test/results/clientpositive/insert2_overwrite_partitions.q.out @@ -13,11 +13,10 @@ POSTHOOK: Output: database:db2 PREHOOK: query: CREATE TABLE db1.sourceTable (one string,two string) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.sourceTable +PREHOOK: Output: db1@sourceTable POSTHOOK: query: CREATE TABLE db1.sourceTable (one string,two string) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.sourceTable POSTHOOK: Output: db1@sourceTable PREHOOK: query: load data local inpath '../../data/files/kv1.txt' INTO TABLE db1.sourceTable partition(ds='2011-11-11') PREHOOK: type: LOAD @@ -39,11 +38,10 @@ POSTHOOK: Output: db1@sourcetable@ds=2011-11-11 PREHOOK: query: CREATE TABLE db2.destinTable (one string,two string) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db2 -PREHOOK: Output: db2@db2.destinTable +PREHOOK: Output: db2@destinTable POSTHOOK: query: CREATE TABLE db2.destinTable (one string,two string) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db2 -POSTHOOK: Output: db2@db2.destinTable POSTHOOK: Output: db2@destinTable PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE db2.destinTable PARTITION (ds='2011-11-11') SELECT one,two FROM db1.sourceTable WHERE ds='2011-11-11' order by one desc, two desc limit 5 diff --git a/ql/src/test/results/clientpositive/join41.q.out b/ql/src/test/results/clientpositive/join41.q.out index cfb41fd..b464180 100644 --- a/ql/src/test/results/clientpositive/join41.q.out +++ b/ql/src/test/results/clientpositive/join41.q.out @@ -1,9 +1,12 @@ PREHOOK: query: create table s1 as select * from src where key = 0 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@s1 POSTHOOK: query: create table s1 as select * from src where key = 0 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@s1 PREHOOK: query: EXPLAIN SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10) diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out index 5ec5e28..51b4089 100644 --- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out +++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out @@ -4,12 +4,15 @@ PREHOOK: query: -- SORT_QUERY_RESULTS create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@a POSTHOOK: query: -- SORT_QUERY_RESULTS -- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@a PREHOOK: query: -- overlap on a explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) diff --git a/ql/src/test/results/clientpositive/join_view.q.out b/ql/src/test/results/clientpositive/join_view.q.out index 2f4b8c4..3e54177 100644 --- a/ql/src/test/results/clientpositive/join_view.q.out +++ b/ql/src/test/results/clientpositive/join_view.q.out @@ -28,12 +28,15 @@ create view v as select invites.bar, invites2.foo, invites2.ds from invites join PREHOOK: type: CREATEVIEW PREHOOK: Input: default@invites PREHOOK: Input: default@invites2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v POSTHOOK: query: -- test join views: see HIVE-1989 create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@invites POSTHOOK: Input: default@invites2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@v PREHOOK: query: explain select * from v where ds='2011-09-01' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out index 8141d5c..55e1a4c 100644 --- a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out @@ -130,10 +130,13 @@ PREHOOK: query: -- view create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@lv_noalias POSTHOOK: query: -- view create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@lv_noalias PREHOOK: query: explain select * from lv_noalias a join lv_noalias b on a.key=b.key PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/lateral_view_outer.q.out b/ql/src/test/results/clientpositive/lateral_view_outer.q.out index e39b0f8..236c5c3 100644 --- a/ql/src/test/results/clientpositive/lateral_view_outer.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_outer.q.out @@ -185,9 +185,12 @@ POSTHOOK: Input: default@src PREHOOK: query: create table array_valued as select key, if (key > 300, array(value, value), null) as value from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@array_valued POSTHOOK: query: create table array_valued as select key, if (key > 300, array(value, value), null) as value from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@array_valued PREHOOK: query: explain select * from array_valued LATERAL VIEW OUTER explode(value) C AS a limit 10 diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out index ad666e2..1701be1 100644 --- a/ql/src/test/results/clientpositive/merge3.q.out +++ b/ql/src/test/results/clientpositive/merge3.q.out @@ -8,6 +8,8 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@merge_src POSTHOOK: query: -- SORT_QUERY_RESULTS create table merge_src as @@ -18,6 +20,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default POSTHOOK: Output: default@merge_src PREHOOK: query: create table merge_src_part (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE @@ -196,7 +199,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: merge_src2 + name: default.merge_src2 Stage: Stage-2 Stats-Aggr Operator @@ -320,10 +323,13 @@ PREHOOK: query: create table merge_src2 as select key, value from merge_src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@merge_src +PREHOOK: Output: database:default +PREHOOK: Output: default@merge_src2 POSTHOOK: query: create table merge_src2 as select key, value from merge_src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@merge_src +POSTHOOK: Output: database:default POSTHOOK: Output: default@merge_src2 PREHOOK: query: select * from merge_src2 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out index 7d84614..0006ce9 100644 --- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out @@ -3,11 +3,14 @@ PREHOOK: query: -- SORT_QUERY_RESULTS create table src_10 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_10 POSTHOOK: query: -- SORT_QUERY_RESULTS create table src_10 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_10 PREHOOK: query: create table src_lv1 (key string, value string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/multi_join_union.q.out b/ql/src/test/results/clientpositive/multi_join_union.q.out index 3e52390..2ed267d 100644 --- a/ql/src/test/results/clientpositive/multi_join_union.q.out +++ b/ql/src/test/results/clientpositive/multi_join_union.q.out @@ -1,30 +1,42 @@ PREHOOK: query: CREATE TABLE src11 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src11 POSTHOOK: query: CREATE TABLE src11 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src11 PREHOOK: query: CREATE TABLE src12 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src12 POSTHOOK: query: CREATE TABLE src12 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src12 PREHOOK: query: CREATE TABLE src13 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src13 POSTHOOK: query: CREATE TABLE src13 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src13 PREHOOK: query: CREATE TABLE src14 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src14 POSTHOOK: query: CREATE TABLE src14 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src14 PREHOOK: query: EXPLAIN SELECT * FROM src11 a JOIN diff --git a/ql/src/test/results/clientpositive/nestedvirtual.q.out b/ql/src/test/results/clientpositive/nestedvirtual.q.out index bc0be7c..05af502 100644 --- a/ql/src/test/results/clientpositive/nestedvirtual.q.out +++ b/ql/src/test/results/clientpositive/nestedvirtual.q.out @@ -18,10 +18,13 @@ PREHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.o PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@pokes PREHOOK: Input: default@pokes2 +PREHOOK: Output: database:default +PREHOOK: Output: default@jssarma_nilzma_bad POSTHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.offset from (select hash(foo) as val, INPUT__FILE__NAME as filename, BLOCK__OFFSET__INSIDE__FILE as offset from pokes) a join pokes2 b on (a.val = b.foo) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@pokes POSTHOOK: Input: default@pokes2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@jssarma_nilzma_bad PREHOOK: query: drop table jssarma_nilzma_bad PREHOOK: type: DROPTABLE @@ -67,10 +70,13 @@ PREHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.o PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@pokes PREHOOK: Input: default@pokes2 +PREHOOK: Output: database:default +PREHOOK: Output: default@jssarma_nilzma_bad POSTHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.offset from (select hash(foo) as val, INPUT__FILE__NAME as filename, BLOCK__OFFSET__INSIDE__FILE as offset from pokes) a join pokes2 b on (a.val = b.foo) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@pokes POSTHOOK: Input: default@pokes2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@jssarma_nilzma_bad PREHOOK: query: drop table jssarma_nilzma_bad PREHOOK: type: DROPTABLE @@ -116,10 +122,13 @@ PREHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.o PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@pokes PREHOOK: Input: default@pokes2 +PREHOOK: Output: database:default +PREHOOK: Output: default@jssarma_nilzma_bad POSTHOOK: query: create table jssarma_nilzma_bad as select a.val, a.filename, a.offset from (select hash(foo) as val, INPUT__FILE__NAME as filename, BLOCK__OFFSET__INSIDE__FILE as offset from pokes) a join pokes2 b on (a.val = b.foo) POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@pokes POSTHOOK: Input: default@pokes2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@jssarma_nilzma_bad PREHOOK: query: drop table jssarma_nilzma_bad PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out index 2d7496b..4885019 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -1015,7 +1015,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: srcx + name: default.srcx Stage: Stage-2 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/nullformat.q.out b/ql/src/test/results/clientpositive/nullformat.q.out index 553b129..6cfc2b8 100644 --- a/ql/src/test/results/clientpositive/nullformat.q.out +++ b/ql/src/test/results/clientpositive/nullformat.q.out @@ -53,7 +53,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: null_tab1 + name: default.null_tab1 PREHOOK: query: CREATE TABLE null_tab1(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/nullformatCTAS.q.out index 1d76a6d..892de6e 100644 --- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out +++ b/ql/src/test/results/clientpositive/nullformatCTAS.q.out @@ -97,7 +97,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: null_tab3 + name: default.null_tab3 Stage: Stage-2 Stats-Aggr Operator @@ -136,10 +136,13 @@ PREHOOK: query: CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'foo AS SELECT a, b FROM base_tab PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@base_tab +PREHOOK: Output: database:default +PREHOOK: Output: default@null_tab3 POSTHOOK: query: CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' AS SELECT a, b FROM base_tab POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@base_tab +POSTHOOK: Output: database:default POSTHOOK: Output: default@null_tab3 PREHOOK: query: DESCRIBE EXTENDED null_tab3 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out index 979d2a7..d1dc8b2 100644 --- a/ql/src/test/results/clientpositive/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/orc_analyze.q.out @@ -105,7 +105,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3042 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -195,7 +195,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3042 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -309,7 +309,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information @@ -354,7 +354,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 1981 + totalSize 2036 #### A masked pattern was here #### # Storage Information @@ -456,7 +456,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information @@ -501,7 +501,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 1981 + totalSize 2036 #### A masked pattern was here #### # Storage Information @@ -621,7 +621,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21980 - totalSize 4746 + totalSize 4955 #### A masked pattern was here #### # Storage Information @@ -666,7 +666,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22048 - totalSize 4829 + totalSize 5046 #### A masked pattern was here #### # Storage Information @@ -774,7 +774,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21980 - totalSize 4746 + totalSize 4955 #### A masked pattern was here #### # Storage Information @@ -819,7 +819,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22048 - totalSize 4829 + totalSize 5046 #### A masked pattern was here #### # Storage Information @@ -984,7 +984,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_createas1.q.out b/ql/src/test/results/clientpositive/orc_createas1.q.out index a104480..b0c58dd 100644 --- a/ql/src/test/results/clientpositive/orc_createas1.q.out +++ b/ql/src/test/results/clientpositive/orc_createas1.q.out @@ -105,7 +105,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: orc_createas1b + name: default.orc_createas1b Stage: Stage-2 Stats-Aggr Operator @@ -131,11 +131,14 @@ PREHOOK: query: CREATE TABLE orc_createas1b SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_createas1b POSTHOOK: query: CREATE TABLE orc_createas1b STORED AS ORC AS SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_createas1b PREHOOK: query: EXPLAIN SELECT * FROM orc_createas1b ORDER BY key LIMIT 5 PREHOOK: type: QUERY @@ -261,7 +264,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde name: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: orc_createas1c + name: default.orc_createas1c Stage: Stage-2 Stats-Aggr Operator @@ -290,6 +293,8 @@ PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@orc_createas1a PREHOOK: Input: default@orc_createas1a@ds=1 PREHOOK: Input: default@orc_createas1a@ds=2 +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_createas1c POSTHOOK: query: CREATE TABLE orc_createas1c STORED AS ORC AS SELECT key, value, PMOD(HASH(key), 50) as part @@ -298,6 +303,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@orc_createas1a POSTHOOK: Input: default@orc_createas1a@ds=1 POSTHOOK: Input: default@orc_createas1a@ds=2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_createas1c PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c) diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out new file mode 100644 index 0000000..e2d634b --- /dev/null +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -0,0 +1,288 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: create table orc_merge5b (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_merge5b +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: -- 3 mappers +explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +POSTHOOK: query: -- 3 mappers +explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: orc_merge5 + Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (userid <= 13) (type: boolean) + Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_merge5b + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_merge5b + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: -- 5 files total +analyze table orc_merge5b compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: -- 5 files total +analyze table orc_merge5b compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5b +PREHOOK: query: desc formatted orc_merge5b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5b +POSTHOOK: query: desc formatted orc_merge5b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5b +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 5 + numRows 15 + rawDataSize 3825 + totalSize 2862 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from orc_merge5b +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_merge5b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b +#### A masked pattern was here #### +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +PREHOOK: query: alter table orc_merge5b concatenate +PREHOOK: type: ALTER_TABLE_MERGE +PREHOOK: Input: default@orc_merge5b +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: alter table orc_merge5b concatenate +POSTHOOK: type: ALTER_TABLE_MERGE +POSTHOOK: Input: default@orc_merge5b +POSTHOOK: Output: default@orc_merge5b +PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind +analyze table orc_merge5b compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind +analyze table orc_merge5b compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5b +PREHOOK: query: desc formatted orc_merge5b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5b +POSTHOOK: query: desc formatted orc_merge5b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5b +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE true + numFiles 3 + numRows 15 + rawDataSize 3825 + totalSize 2325 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from orc_merge5b +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_merge5b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b +#### A masked pattern was here #### +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out new file mode 100644 index 0000000..c32fbf6 --- /dev/null +++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out @@ -0,0 +1,527 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +-- orc merge file tests for dynamic partition case + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- orc merge file tests for dynamic partition case + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_merge5a +POSTHOOK: query: create table orc_merge5a (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (st double) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_merge5a +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: orc_merge5 + Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: userid (type: bigint), string1 (type: string), subtype (type: double), decimal1 (type: decimal(10,0)), ts (type: timestamp), subtype (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_merge5a + + Stage: Stage-0 + Move Operator + tables: + partition: + st + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_merge5a + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5a +POSTHOOK: query: insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: Output: default@orc_merge5a@st=1.8 +POSTHOOK: Output: default@orc_merge5a@st=8.0 +POSTHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5a +POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: Output: default@orc_merge5a@st=1.8 +POSTHOOK: Output: default@orc_merge5a@st=8.0 +POSTHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5a +POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: Output: default@orc_merge5a@st=1.8 +POSTHOOK: Output: default@orc_merge5a@st=8.0 +POSTHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5a +POSTHOOK: query: insert into table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 order by userid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: Output: default@orc_merge5a@st=1.8 +POSTHOOK: Output: default@orc_merge5a@st=8.0 +POSTHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=0.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=1.8).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=8.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=80.0 +PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=0.8 +PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5a +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Partition Information +# col_name data_type comment + +st double + +# Detailed Partition Information +Partition Value: [80.0] +Database: default +Table: orc_merge5a +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 4 + numRows 4 + rawDataSize 1020 + totalSize 2092 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5a +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Partition Information +# col_name data_type comment + +st double + +# Detailed Partition Information +Partition Value: [0.8] +Database: default +Table: orc_merge5a +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 4 + numRows 8 + rawDataSize 2040 + totalSize 2204 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions orc_merge5a +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: show partitions orc_merge5a +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@orc_merge5a +st=0.8 +st=1.8 +st=8.0 +st=80.0 +PREHOOK: query: select * from orc_merge5a where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a +PREHOOK: Input: default@orc_merge5a@st=0.8 +PREHOOK: Input: default@orc_merge5a@st=1.8 +PREHOOK: Input: default@orc_merge5a@st=8.0 +PREHOOK: Input: default@orc_merge5a@st=80.0 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_merge5a where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a@st=0.8 +POSTHOOK: Input: default@orc_merge5a@st=1.8 +POSTHOOK: Input: default@orc_merge5a@st=8.0 +POSTHOOK: Input: default@orc_merge5a@st=80.0 +#### A masked pattern was here #### +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +PREHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate +PREHOOK: type: ALTER_PARTITION_MERGE +POSTHOOK: query: explain alter table orc_merge5a partition(st=80.0) concatenate +POSTHOOK: type: ALTER_PARTITION_MERGE +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-0 + + Stage: Stage-1 + Move Operator + tables: + partition: + st 80.0 + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orc_merge5a + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate +PREHOOK: type: ALTER_PARTITION_MERGE +PREHOOK: Input: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: query: alter table orc_merge5a partition(st=80.0) concatenate +POSTHOOK: type: ALTER_PARTITION_MERGE +POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=80.0 +PREHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate +PREHOOK: type: ALTER_PARTITION_MERGE +PREHOOK: Input: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate +POSTHOOK: type: ALTER_PARTITION_MERGE +POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=0.8 +PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=80.0 +POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=80.0 +PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan +PREHOOK: type: QUERY +PREHOOK: Output: default@orc_merge5a +PREHOOK: Output: default@orc_merge5a@st=0.8 +POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan +POSTHOOK: type: QUERY +POSTHOOK: Output: default@orc_merge5a +POSTHOOK: Output: default@orc_merge5a@st=0.8 +PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: desc formatted orc_merge5a partition(st=80.0) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5a +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Partition Information +# col_name data_type comment + +st double + +# Detailed Partition Information +Partition Value: [80.0] +Database: default +Table: orc_merge5a +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 3 + numRows 4 + rawDataSize 1020 + totalSize 1851 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: desc formatted orc_merge5a partition(st=0.8) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: desc formatted orc_merge5a partition(st=0.8) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@orc_merge5a +# col_name data_type comment + +userid bigint +string1 string +subtype double +decimal1 decimal(10,0) +ts timestamp + +# Partition Information +# col_name data_type comment + +st double + +# Detailed Partition Information +Partition Value: [0.8] +Database: default +Table: orc_merge5a +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 3 + numRows 8 + rawDataSize 2040 + totalSize 1944 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: show partitions orc_merge5a +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@orc_merge5a +POSTHOOK: query: show partitions orc_merge5a +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@orc_merge5a +st=0.8 +st=1.8 +st=8.0 +st=80.0 +PREHOOK: query: select * from orc_merge5a where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a +PREHOOK: Input: default@orc_merge5a@st=0.8 +PREHOOK: Input: default@orc_merge5a@st=1.8 +PREHOOK: Input: default@orc_merge5a@st=8.0 +PREHOOK: Input: default@orc_merge5a@st=80.0 +#### A masked pattern was here #### +POSTHOOK: query: select * from orc_merge5a where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a +POSTHOOK: Input: default@orc_merge5a@st=0.8 +POSTHOOK: Input: default@orc_merge5a@st=1.8 +POSTHOOK: Input: default@orc_merge5a@st=8.0 +POSTHOOK: Input: default@orc_merge5a@st=80.0 +#### A masked pattern was here #### +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +13 bar 80.0 2 1969-12-31 16:00:05 80.0 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +2 foo 0.8 1 1969-12-31 16:00:00 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 +5 eat 0.8 6 1969-12-31 16:00:20 0.8 diff --git a/ql/src/test/results/clientpositive/orc_ppd_timestamp.q.out b/ql/src/test/results/clientpositive/orc_ppd_timestamp.q.out new file mode 100644 index 0000000..f1030a9 --- /dev/null +++ b/ql/src/test/results/clientpositive/orc_ppd_timestamp.q.out @@ -0,0 +1,292 @@ +PREHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@newtypesorc +POSTHOOK: query: create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), ts timestamp) stored as orc tblproperties("orc.stripe.size"="16777216") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@newtypesorc +PREHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@newtypesorc +POSTHOOK: query: insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("2011-01-01 01:01:01" as timestamp) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("2011-01-20 01:01:01" as timestamp) from src src2) uniontbl +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@newtypesorc +POSTHOOK: Lineage: newtypesorc.c EXPRESSION [] +POSTHOOK: Lineage: newtypesorc.d EXPRESSION [] +POSTHOOK: Lineage: newtypesorc.ts EXPRESSION [] +POSTHOOK: Lineage: newtypesorc.v EXPRESSION [] +PREHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) +select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01' +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) +select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01' +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where cast(ts as string)='2011-01-01 01:01:01' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as timestamp) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts=cast('2011-01-01 01:01:01' as varchar(20)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +445653015500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +1033237945500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where ts!=cast('2011-01-01 01:01:01' as timestamp) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +1033237945500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where ts 'val_9') PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 POSTHOOK: query: -- view test create view cv1 as select * @@ -135,6 +137,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv1 PREHOOK: query: select * from cv1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/subquery_exists_having.q.out b/ql/src/test/results/clientpositive/subquery_exists_having.q.out index 003ae75..765e236 100644 --- a/ql/src/test/results/clientpositive/subquery_exists_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists_having.q.out @@ -320,6 +320,8 @@ having exists ) PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 POSTHOOK: query: -- view test create view cv1 as select b.key, count(*) as c @@ -332,6 +334,7 @@ having exists ) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv1 PREHOOK: query: select * from cv1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index 7b8e974..dc6fe78 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -1413,22 +1413,28 @@ create view T1_v as select key from src where key <'11' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@T1_v POSTHOOK: query: -- null check create view T1_v as select key from src where key <'11' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@T1_v PREHOOK: query: create view T2_v as select case when key > '104' then null else key end as key from T1_v PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@t1_v +PREHOOK: Output: database:default +PREHOOK: Output: default@T2_v POSTHOOK: query: create view T2_v as select case when key > '104' then null else key end as key from T1_v POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@t1_v +POSTHOOK: Output: database:default POSTHOOK: Output: default@T2_v Warning: Shuffle Join JOIN[24][tables = [t1_v, sq_1_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: explain diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out index 5e33699..9844d99 100644 --- a/ql/src/test/results/clientpositive/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/subquery_views.q.out @@ -10,6 +10,8 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 POSTHOOK: query: -- SORT_QUERY_RESULTS -- exists test @@ -22,6 +24,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv1 PREHOOK: query: select * from cv1 where cv1.key in (select key from cv1 c where c.key > '95') @@ -51,6 +54,8 @@ where b.key not in ) PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv2 POSTHOOK: query: -- not in test create view cv2 as select * @@ -62,6 +67,7 @@ where b.key not in ) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv2 Warning: Shuffle Join JOIN[42][tables = [b, sq_1_notin_nullcheck]] in Stage 'Stage-5:MAPRED' is a cross product Warning: Shuffle Join JOIN[18][tables = [b, sq_1_notin_nullcheck]] in Stage 'Stage-1:MAPRED' is a cross product @@ -416,6 +422,8 @@ group by key, value having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ) PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv3 POSTHOOK: query: -- in where + having create view cv3 as select key, value, count(*) @@ -425,6 +433,7 @@ group by key, value having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv3 PREHOOK: query: select * from cv3 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/symlink_text_input_format.q.out b/ql/src/test/results/clientpositive/symlink_text_input_format.q.out index 65d9a89..f9d517c 100644 --- a/ql/src/test/results/clientpositive/symlink_text_input_format.q.out +++ b/ql/src/test/results/clientpositive/symlink_text_input_format.q.out @@ -18,7 +18,7 @@ STAGE PLANS: columns: key string, value string input format: org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat - name: symlink_text_input_format + name: default.symlink_text_input_format PREHOOK: query: CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat' PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/temp_table.q.out b/ql/src/test/results/clientpositive/temp_table.q.out index b2dc8b3..0fa24dd 100644 --- a/ql/src/test/results/clientpositive/temp_table.q.out +++ b/ql/src/test/results/clientpositive/temp_table.q.out @@ -59,7 +59,7 @@ STAGE PLANS: #### A masked pattern was here #### output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: foo + name: default.foo isTemporary: true Stage: Stage-2 @@ -98,9 +98,12 @@ STAGE PLANS: PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@foo POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@foo PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 PREHOOK: type: CREATETABLE_AS_SELECT @@ -163,7 +166,7 @@ STAGE PLANS: #### A masked pattern was here #### output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: bar + name: default.bar isTemporary: true Stage: Stage-2 @@ -202,9 +205,12 @@ STAGE PLANS: PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@bar POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@bar PREHOOK: query: DESCRIBE foo PREHOOK: type: DESCTABLE @@ -452,9 +458,12 @@ POSTHOOK: type: SHOWTABLES PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@foo +PREHOOK: Output: database:two +PREHOOK: Output: two@foo POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@foo +POSTHOOK: Output: database:two POSTHOOK: Output: two@foo PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES diff --git a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out new file mode 100644 index 0000000..b021b70 --- /dev/null +++ b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out @@ -0,0 +1,510 @@ +PREHOOK: query: -- Based on display_colstats_tbllvl.q, output should be almost exactly the same. +DROP TABLE IF EXISTS UserVisits_web_text_none +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Based on display_colstats_tbllvl.q, output should be almost exactly the same. +DROP TABLE IF EXISTS UserVisits_web_text_none +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- Hack, set external location because generated filename changes during test runs +CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( + sourceIP string, + destURL string, + visitDate string, + adRevenue float, + userAgent string, + cCode string, + lCode string, + sKeyword string, + avgTimeOnSite int) +row format delimited fields terminated by '|' stored as textfile +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@UserVisits_web_text_none +POSTHOOK: query: -- Hack, set external location because generated filename changes during test runs +CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( + sourceIP string, + destURL string, + visitDate string, + adRevenue float, + userAgent string, + cCode string, + lCode string, + sKeyword string, + avgTimeOnSite int) +row format delimited fields terminated by '|' stored as textfile +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@UserVisits_web_text_none +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@uservisits_web_text_none +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@uservisits_web_text_none +PREHOOK: query: desc extended UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc extended UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +sourceIP string from deserializer +PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sourceIP string from deserializer +PREHOOK: query: explain +analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +PREHOOK: type: QUERY +POSTHOOK: query: explain +analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Map Reduce + Map Operator Tree: + TableScan + alias: uservisits_web_text_none + Select Operator + expressions: sourceip (type: string), avgtimeonsite (type: int), adrevenue (type: float) + outputColumnNames: sourceip, avgtimeonsite, adrevenue + Group By Operator + aggregations: compute_stats(sourceip, 16), compute_stats(avgtimeonsite, 16), compute_stats(adrevenue, 16) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Select Operator + expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-1 + Column Stats Work + Column Stats Desc: + Columns: sourceIP, avgTimeOnSite, adRevenue + Column Types: string, int, float + Table: uservisits_web_text_none + +PREHOOK: query: explain extended +analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_ANALYZE + TOK_TAB + TOK_TABNAME + UserVisits_web_text_none + columns + TOK_TABCOLNAME + sourceIP + avgTimeOnSite + adRevenue + + +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Map Reduce + Map Operator Tree: + TableScan + alias: uservisits_web_text_none + GatherStats: false + Select Operator + expressions: sourceip (type: string), avgtimeonsite (type: int), adrevenue (type: float) + outputColumnNames: sourceip, avgtimeonsite, adrevenue + Group By Operator + aggregations: compute_stats(sourceip, 16), compute_stats(avgtimeonsite, 16), compute_stats(adrevenue, 16) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + sort order: + tag: -1 + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: uservisits_web_text_none + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + EXTERNAL TRUE + bucket_count -1 + columns sourceip,desturl,visitdate,adrevenue,useragent,ccode,lcode,skeyword,avgtimeonsite + columns.comments + columns.types string:string:string:float:string:string:string:string:int + field.delim | +#### A masked pattern was here #### + name default.uservisits_web_text_none + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} + serialization.format | + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 7060 + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + EXTERNAL TRUE + bucket_count -1 + columns sourceip,desturl,visitdate,adrevenue,useragent,ccode,lcode,skeyword,avgtimeonsite + columns.comments + columns.types string:string:string:float:string:string:string:string:int + field.delim | +#### A masked pattern was here #### + name default.uservisits_web_text_none + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} + serialization.format | + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 7060 + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.uservisits_web_text_none + name: default.uservisits_web_text_none + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Select Operator + expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2 + columns.types struct:struct:struct + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-1 + Column Stats Work + Column Stats Desc: + Columns: sourceIP, avgTimeOnSite, adRevenue + Column Types: string, int, float + Table: uservisits_web_text_none + Is Table Level Stats: true + +PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +PREHOOK: type: QUERY +PREHOOK: Input: default@uservisits_web_text_none +#### A masked pattern was here #### +POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue +POSTHOOK: type: QUERY +POSTHOOK: Input: default@uservisits_web_text_none +#### A masked pattern was here #### +PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sourceIP string 0 69 12.763636363636364 13 from deserializer +PREHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none avgTimeOnSite +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +avgTimeOnSite int 1 9 0 11 from deserializer +PREHOOK: query: desc formatted UserVisits_web_text_none adRevenue +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none adRevenue +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +adRevenue float 13.099044799804688 492.98870849609375 0 58 from deserializer +PREHOOK: query: CREATE TEMPORARY TABLE empty_tab( + a int, + b double, + c string, + d boolean, + e binary) +row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@empty_tab +POSTHOOK: query: CREATE TEMPORARY TABLE empty_tab( + a int, + b double, + c string, + d boolean, + e binary) +row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@empty_tab +PREHOOK: query: desc formatted empty_tab a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@empty_tab +POSTHOOK: query: desc formatted empty_tab a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@empty_tab +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +a int from deserializer +PREHOOK: query: explain +analyze table empty_tab compute statistics for columns a,b,c,d,e +PREHOOK: type: QUERY +POSTHOOK: query: explain +analyze table empty_tab compute statistics for columns a,b,c,d,e +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Map Reduce + Map Operator Tree: + TableScan + alias: empty_tab + Select Operator + expressions: a (type: int), b (type: double), c (type: string), d (type: boolean), e (type: binary) + outputColumnNames: a, b, c, d, e + Group By Operator + aggregations: compute_stats(a, 16), compute_stats(b, 16), compute_stats(c, 16), compute_stats(d, 16), compute_stats(e, 16) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct) + Reduce Operator Tree: + Group By Operator + aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3), compute_stats(VALUE._col4) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Select Operator + expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-1 + Column Stats Work + Column Stats Desc: + Columns: a, b, c, d, e + Column Types: int, double, string, boolean, binary + Table: empty_tab + +PREHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e +PREHOOK: type: QUERY +PREHOOK: Input: default@empty_tab +#### A masked pattern was here #### +POSTHOOK: query: analyze table empty_tab compute statistics for columns a,b,c,d,e +POSTHOOK: type: QUERY +POSTHOOK: Input: default@empty_tab +#### A masked pattern was here #### +PREHOOK: query: desc formatted empty_tab a +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@empty_tab +POSTHOOK: query: desc formatted empty_tab a +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@empty_tab +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +a int 0 0 0 0 from deserializer +PREHOOK: query: desc formatted empty_tab b +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@empty_tab +POSTHOOK: query: desc formatted empty_tab b +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@empty_tab +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +b double 0.0 0.0 0 0 from deserializer +PREHOOK: query: CREATE DATABASE test +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:test +POSTHOOK: query: CREATE DATABASE test +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:test +PREHOOK: query: USE test +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:test +POSTHOOK: query: USE test +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:test +PREHOOK: query: CREATE TEMPORARY TABLE UserVisits_web_text_none ( + sourceIP string, + destURL string, + visitDate string, + adRevenue float, + userAgent string, + cCode string, + lCode string, + sKeyword string, + avgTimeOnSite int) +row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:test +PREHOOK: Output: test@UserVisits_web_text_none +POSTHOOK: query: CREATE TEMPORARY TABLE UserVisits_web_text_none ( + sourceIP string, + destURL string, + visitDate string, + adRevenue float, + userAgent string, + cCode string, + lCode string, + sKeyword string, + avgTimeOnSite int) +row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:test +POSTHOOK: Output: test@UserVisits_web_text_none +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: test@uservisits_web_text_none +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: test@uservisits_web_text_none +PREHOOK: query: desc extended UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc extended UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +sourceIP string from deserializer +PREHOOK: query: desc extended test.UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc extended test.UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +sourceIP string from deserializer +PREHOOK: query: desc extended default.UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc extended default.UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +sourceIP string from deserializer +PREHOOK: query: desc formatted UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sourceIP string from deserializer +PREHOOK: query: desc formatted test.UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc formatted test.UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sourceIP string from deserializer +PREHOOK: query: desc formatted default.UserVisits_web_text_none sourceIP +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@uservisits_web_text_none +POSTHOOK: query: desc formatted default.UserVisits_web_text_none sourceIP +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sourceIP string 0 69 12.763636363636364 13 from deserializer +PREHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword +PREHOOK: type: QUERY +PREHOOK: Input: test@uservisits_web_text_none +#### A masked pattern was here #### +POSTHOOK: query: analyze table UserVisits_web_text_none compute statistics for columns sKeyword +POSTHOOK: type: QUERY +POSTHOOK: Input: test@uservisits_web_text_none +#### A masked pattern was here #### +PREHOOK: query: desc extended UserVisits_web_text_none sKeyword +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc extended UserVisits_web_text_none sKeyword +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +sKeyword string from deserializer +PREHOOK: query: desc formatted UserVisits_web_text_none sKeyword +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc formatted UserVisits_web_text_none sKeyword +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sKeyword string 0 49 7.872727272727273 19 from deserializer +PREHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword +PREHOOK: type: DESCTABLE +PREHOOK: Input: test@uservisits_web_text_none +POSTHOOK: query: desc formatted test.UserVisits_web_text_none sKeyword +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: test@uservisits_web_text_none +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +sKeyword string 0 49 7.872727272727273 19 from deserializer diff --git a/ql/src/test/results/clientpositive/temp_table_gb1.q.out b/ql/src/test/results/clientpositive/temp_table_gb1.q.out index eaaf561..573acce 100644 --- a/ql/src/test/results/clientpositive/temp_table_gb1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_gb1.q.out @@ -11,9 +11,12 @@ POSTHOOK: Output: default@dest_g2 PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_temp POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp PREHOOK: query: FROM src_temp INSERT OVERWRITE TABLE dest_g2 SELECT substr(src_temp.key,1,1), count(DISTINCT substr(src_temp.value,5)), concat(substr(src_temp.key,1,1),sum(substr(src_temp.value,5))) GROUP BY substr(src_temp.key,1,1) diff --git a/ql/src/test/results/clientpositive/temp_table_join1.q.out b/ql/src/test/results/clientpositive/temp_table_join1.q.out index 86ce39c..79d5774 100644 --- a/ql/src/test/results/clientpositive/temp_table_join1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_join1.q.out @@ -1,16 +1,22 @@ PREHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_nontemp POSTHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_nontemp PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_temp POSTHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp PREHOOK: query: -- Non temp table join EXPLAIN diff --git a/ql/src/test/results/clientpositive/temp_table_names.q.out b/ql/src/test/results/clientpositive/temp_table_names.q.out index 7c79777..890ab14 100644 --- a/ql/src/test/results/clientpositive/temp_table_names.q.out +++ b/ql/src/test/results/clientpositive/temp_table_names.q.out @@ -1,12 +1,11 @@ PREHOOK: query: -- Test temp tables with upper/lower case names create temporary table Default.Temp_Table_Names (C1 string, c2 string) PREHOOK: type: CREATETABLE -PREHOOK: Output: Default@Default.Temp_Table_Names +PREHOOK: Output: Default@Temp_Table_Names PREHOOK: Output: database:default POSTHOOK: query: -- Test temp tables with upper/lower case names create temporary table Default.Temp_Table_Names (C1 string, c2 string) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: Default@Default.Temp_Table_Names POSTHOOK: Output: Default@Temp_Table_Names POSTHOOK: Output: database:default PREHOOK: query: show tables 'Temp_Table*' diff --git a/ql/src/test/results/clientpositive/temp_table_precedence.q.out b/ql/src/test/results/clientpositive/temp_table_precedence.q.out index b4b003e..2b2e0aa 100644 --- a/ql/src/test/results/clientpositive/temp_table_precedence.q.out +++ b/ql/src/test/results/clientpositive/temp_table_precedence.q.out @@ -8,13 +8,12 @@ PREHOOK: query: -- Create non-temp tables create table ttp.tab1 (a1 string, a2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:ttp -PREHOOK: Output: ttp@ttp.tab1 +PREHOOK: Output: ttp@tab1 POSTHOOK: query: -- Create non-temp tables create table ttp.tab1 (a1 string, a2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:ttp POSTHOOK: Output: ttp@tab1 -POSTHOOK: Output: ttp@ttp.tab1 PREHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 5 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -47,12 +46,11 @@ POSTHOOK: Input: ttp@tab1 PREHOOK: query: create table ttp.tab2 (b1 string, b2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:ttp -PREHOOK: Output: ttp@ttp.tab2 +PREHOOK: Output: ttp@tab2 POSTHOOK: query: create table ttp.tab2 (b1 string, b2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:ttp POSTHOOK: Output: ttp@tab2 -POSTHOOK: Output: ttp@ttp.tab2 PREHOOK: query: insert overwrite table ttp.tab2 select * from src where key = 2 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -84,13 +82,12 @@ PREHOOK: query: -- Now create temp table with same name create temporary table ttp.tab1 (c1 int, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:ttp -PREHOOK: Output: ttp@ttp.tab1 +PREHOOK: Output: ttp@tab1 POSTHOOK: query: -- Now create temp table with same name create temporary table ttp.tab1 (c1 int, c2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:ttp POSTHOOK: Output: ttp@tab1 -POSTHOOK: Output: ttp@ttp.tab1 PREHOOK: query: insert overwrite table ttp.tab1 select * from src where key = 0 limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/temp_table_subquery1.q.out b/ql/src/test/results/clientpositive/temp_table_subquery1.q.out index 3301890..8a9c537 100644 --- a/ql/src/test/results/clientpositive/temp_table_subquery1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_subquery1.q.out @@ -1,9 +1,12 @@ PREHOOK: query: create temporary table src_temp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_temp POSTHOOK: query: create temporary table src_temp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp PREHOOK: query: -- subquery exists select * diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out index fd7eff9..ba86137 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out @@ -2,21 +2,27 @@ PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@A POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@A PREHOOK: query: create table B as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@B POSTHOOK: query: create table B as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@B Warning: Shuffle Join JOIN[4][tables = [a, b]] in Stage 'Reducer 2' is a cross product PREHOOK: query: explain select * from A join B diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index 7774945..21e1a6d 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -2,21 +2,27 @@ PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@A POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@A PREHOOK: query: create table B as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@B POSTHOOK: query: create table B as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@B Warning: Map Join MAPJOIN[7][bigTable=a] in task 'Map 2' is a cross product PREHOOK: query: explain select * from A join B diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out index 5ff7981..e1feeac 100644 --- a/ql/src/test/results/clientpositive/tez/ctas.q.out +++ b/ql/src/test/results/clientpositive/tez/ctas.q.out @@ -91,7 +91,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_CTAS1 + name: default.nzhang_CTAS1 Stage: Stage-3 Stats-Aggr Operator @@ -105,9 +105,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_CTAS1 POSTHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_CTAS1 PREHOOK: query: select * from nzhang_CTAS1 PREHOOK: type: QUERY @@ -236,7 +239,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas2 + name: default.nzhang_ctas2 Stage: Stage-3 Stats-Aggr Operator @@ -250,9 +253,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas2 POSTHOOK: query: create table nzhang_ctas2 as select * from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas2 PREHOOK: query: select * from nzhang_ctas2 PREHOOK: type: QUERY @@ -381,7 +387,7 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat serde name: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: nzhang_ctas3 + name: default.nzhang_ctas3 Stage: Stage-3 Stats-Aggr Operator @@ -395,9 +401,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas3 POSTHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas3 PREHOOK: query: select * from nzhang_ctas3 PREHOOK: type: QUERY @@ -592,7 +601,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas4 + name: default.nzhang_ctas4 Stage: Stage-3 Stats-Aggr Operator @@ -606,9 +615,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas4 POSTHOOK: query: create table nzhang_ctas4 row format delimited fields terminated by ',' stored as textfile as select key, value from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas4 PREHOOK: query: select * from nzhang_ctas4 PREHOOK: type: QUERY @@ -854,7 +866,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: nzhang_ctas5 + name: default.nzhang_ctas5 Stage: Stage-3 Stats-Aggr Operator @@ -869,9 +881,12 @@ STAGE PLANS: PREHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas5 POSTHOOK: query: create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas5 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string) PREHOOK: type: CREATETABLE @@ -894,7 +909,10 @@ POSTHOOK: Lineage: nzhang_ctas6.to SIMPLE [(src)src.FieldSchema(name:value, type PREHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nzhang_ctas6 +PREHOOK: Output: database:default +PREHOOK: Output: default@nzhang_ctas7 POSTHOOK: query: create table nzhang_ctas7 as select key, `to` from nzhang_ctas6 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nzhang_ctas6 +POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_ctas7 diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index 5083ae2..2897c41 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -14,6 +14,7 @@ PREHOOK: query: create table over1k( fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k POSTHOOK: query: create table over1k( t tinyint, si smallint, @@ -42,6 +43,7 @@ POSTHOOK: Output: default@over1k PREHOOK: query: create table over1k_orc like over1k PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_orc POSTHOOK: query: create table over1k_orc like over1k POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -81,6 +83,7 @@ PREHOOK: query: create table over1k_part_orc( partitioned by (ds string, t tinyint) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_orc POSTHOOK: query: create table over1k_part_orc( si smallint, i int, @@ -93,6 +96,7 @@ POSTHOOK: Output: default@over1k_part_orc PREHOOK: query: create table over1k_part_limit_orc like over1k_part_orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_limit_orc POSTHOOK: query: create table over1k_part_limit_orc like over1k_part_orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -114,6 +118,7 @@ PREHOOK: query: create table over1k_part_buck_orc( clustered by (si) into 4 buckets stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_orc POSTHOOK: query: create table over1k_part_buck_orc( si smallint, i int, @@ -134,6 +139,7 @@ PREHOOK: query: create table over1k_part_buck_sort_orc( sorted by (f) into 4 buckets stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_sort_orc POSTHOOK: query: create table over1k_part_buck_sort_orc( si smallint, i int, @@ -195,6 +201,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -280,6 +287,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -564,6 +572,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -649,6 +658,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -1292,6 +1302,7 @@ create table over1k_part2_orc( partitioned by (ds string, t tinyint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part2_orc POSTHOOK: query: -- tests for HIVE-6883 create table over1k_part2_orc( si smallint, @@ -1419,6 +1430,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized Reducer 3 Reduce Operator Tree: Extract @@ -1762,6 +1774,7 @@ create table over1k_part_buck_sort2_orc( sorted by (f) into 1 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default +PREHOOK: Output: default@over1k_part_buck_sort2_orc POSTHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. -- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number -- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test. diff --git a/ql/src/test/results/clientpositive/tez/insert1.q.out b/ql/src/test/results/clientpositive/tez/insert1.q.out index e15b224..e3088e0 100644 --- a/ql/src/test/results/clientpositive/tez/insert1.q.out +++ b/ql/src/test/results/clientpositive/tez/insert1.q.out @@ -143,12 +143,11 @@ POSTHOOK: Output: database:x PREHOOK: query: create table x.insert1(key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:x -PREHOOK: Output: x@x.insert1 +PREHOOK: Output: x@insert1 POSTHOOK: query: create table x.insert1(key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:x POSTHOOK: Output: x@insert1 -POSTHOOK: Output: x@x.insert1 PREHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) PREHOOK: type: QUERY POSTHOOK: query: explain insert into table x.INSERT1 select a.key, a.value from insert2 a WHERE (a.key=-1) @@ -406,11 +405,10 @@ POSTHOOK: Output: database:db1 PREHOOK: query: CREATE TABLE db1.result(col1 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 -PREHOOK: Output: db1@db1.result +PREHOOK: Output: db1@result POSTHOOK: query: CREATE TABLE db1.result(col1 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 -POSTHOOK: Output: db1@db1.result POSTHOOK: Output: db1@result PREHOOK: query: INSERT OVERWRITE TABLE db1.result SELECT 'db1_insert1' FROM src LIMIT 1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out index 979d2a7..d1dc8b2 100644 --- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out @@ -105,7 +105,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3042 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -195,7 +195,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3042 + totalSize 3098 #### A masked pattern was here #### # Storage Information @@ -309,7 +309,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information @@ -354,7 +354,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 1981 + totalSize 2036 #### A masked pattern was here #### # Storage Information @@ -456,7 +456,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information @@ -501,7 +501,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 1981 + totalSize 2036 #### A masked pattern was here #### # Storage Information @@ -621,7 +621,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21980 - totalSize 4746 + totalSize 4955 #### A masked pattern was here #### # Storage Information @@ -666,7 +666,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22048 - totalSize 4829 + totalSize 5046 #### A masked pattern was here #### # Storage Information @@ -774,7 +774,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 21980 - totalSize 4746 + totalSize 4955 #### A masked pattern was here #### # Storage Information @@ -819,7 +819,7 @@ Partition Parameters: numFiles 4 numRows 50 rawDataSize 22048 - totalSize 4829 + totalSize 5046 #### A masked pattern was here #### # Storage Information @@ -984,7 +984,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 1962 + totalSize 2016 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/ptf.q.out b/ql/src/test/results/clientpositive/tez/ptf.q.out index 2182450..75b2ffe 100644 --- a/ql/src/test/results/clientpositive/tez/ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/ptf.q.out @@ -772,6 +772,8 @@ from part group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part +PREHOOK: Output: database:default +PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 16. testViewAsTableInputToPTF create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, @@ -780,6 +782,7 @@ from part group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@part +POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: select p_mfgr, p_brand, s, sum(s) over w1 as s1 diff --git a/ql/src/test/results/clientpositive/tez/stats_counter.q.out b/ql/src/test/results/clientpositive/tez/stats_counter.q.out index b1d5b8f..e2980e8 100644 --- a/ql/src/test/results/clientpositive/tez/stats_counter.q.out +++ b/ql/src/test/results/clientpositive/tez/stats_counter.q.out @@ -2,10 +2,13 @@ PREHOOK: query: -- by analyze create table dummy1 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dummy1 POSTHOOK: query: -- by analyze create table dummy1 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dummy1 PREHOOK: query: analyze table dummy1 compute statistics PREHOOK: type: QUERY @@ -55,10 +58,13 @@ PREHOOK: query: -- by autogather create table dummy2 as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dummy2 POSTHOOK: query: -- by autogather create table dummy2 as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dummy2 PREHOOK: query: desc formatted dummy2 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/tez/subquery_exists.q.out b/ql/src/test/results/clientpositive/tez/subquery_exists.q.out index d90d1dc..654563d 100644 --- a/ql/src/test/results/clientpositive/tez/subquery_exists.q.out +++ b/ql/src/test/results/clientpositive/tez/subquery_exists.q.out @@ -133,6 +133,8 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 POSTHOOK: query: -- view test create view cv1 as select * @@ -143,6 +145,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9') POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@cv1 PREHOOK: query: select * from cv1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/temp_table.q.out b/ql/src/test/results/clientpositive/tez/temp_table.q.out index eba40b5..53445df 100644 --- a/ql/src/test/results/clientpositive/tez/temp_table.q.out +++ b/ql/src/test/results/clientpositive/tez/temp_table.q.out @@ -46,7 +46,7 @@ STAGE PLANS: #### A masked pattern was here #### output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: foo + name: default.foo isTemporary: true Stage: Stage-3 @@ -61,9 +61,12 @@ STAGE PLANS: PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@foo POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@foo PREHOOK: query: EXPLAIN CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 PREHOOK: type: CREATETABLE_AS_SELECT @@ -113,7 +116,7 @@ STAGE PLANS: #### A masked pattern was here #### output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: bar + name: default.bar isTemporary: true Stage: Stage-3 @@ -128,9 +131,12 @@ STAGE PLANS: PREHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@bar POSTHOOK: query: CREATE TEMPORARY TABLE bar AS SELECT * FROM src WHERE key % 2 = 1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@bar PREHOOK: query: DESCRIBE foo PREHOOK: type: DESCTABLE @@ -378,9 +384,12 @@ POSTHOOK: type: SHOWTABLES PREHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@foo +PREHOOK: Output: database:two +PREHOOK: Output: two@foo POSTHOOK: query: CREATE TEMPORARY TABLE foo AS SELECT * FROM default.foo POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@foo +POSTHOOK: Output: database:two POSTHOOK: Output: two@foo PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES diff --git a/ql/src/test/results/clientpositive/tez/tez_dml.q.out b/ql/src/test/results/clientpositive/tez/tez_dml.q.out index 625ce0e..5aa5631 100644 --- a/ql/src/test/results/clientpositive/tez/tez_dml.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_dml.q.out @@ -82,7 +82,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: tmp_src + name: default.tmp_src Stage: Stage-3 Stats-Aggr Operator @@ -96,9 +96,12 @@ STAGE PLANS: PREHOOK: query: CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@tmp_src POSTHOOK: query: CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp_src PREHOOK: query: SELECT * FROM tmp_src PREHOOK: type: QUERY @@ -1478,10 +1481,13 @@ PREHOOK: query: -- create empty table CREATE TABLE empty STORED AS orc AS SELECT * FROM tmp_src_part WHERE d = -1000 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@tmp_src_part +PREHOOK: Output: database:default +PREHOOK: Output: default@empty POSTHOOK: query: -- create empty table CREATE TABLE empty STORED AS orc AS SELECT * FROM tmp_src_part WHERE d = -1000 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@tmp_src_part +POSTHOOK: Output: database:default POSTHOOK: Output: default@empty PREHOOK: query: SELECT * FROM empty PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out index 0fc9b78..7f94016 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out @@ -92,12 +92,15 @@ UNION ALL select s2.key as key, s2.value as value from src s2 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as select s1.key as key, s1.value as value from src s1 join src s3 on s1.key=s3.key UNION ALL select s2.key as key, s2.value as value from src s2 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by key, value limit 20 PREHOOK: type: QUERY @@ -268,12 +271,15 @@ select count(*) as cnt from (select u1.key as k1, u2.key as k2 from u as u1 join u as u2 on (u1.key = u2.key)) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as with u as (select * from src union all select * from src) select count(*) as cnt from (select u1.key as k1, u2.key as k2 from u as u1 join u as u2 on (u1.key = u2.key)) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by cnt limit 20 PREHOOK: type: QUERY @@ -401,12 +407,15 @@ src s1 join (select * from src union all select * from src) u1 on s1.key = u1.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as select s1.key as skey, u1.key as ukey from src s1 join (select * from src union all select * from src) u1 on s1.key = u1.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by skey, ukey limit 20 PREHOOK: type: QUERY @@ -708,6 +717,8 @@ join src s8 on (u1.key = s8.key) order by lkey PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as select s1.key as skey, u1.key as ukey, s8.key as lkey from src s1 @@ -718,6 +729,7 @@ join src s8 on (u1.key = s8.key) order by lkey POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by skey, ukey, lkey limit 100 PREHOOK: type: QUERY @@ -951,11 +963,14 @@ select s2.key as key from src s2 join src s3 on s2.key = s3.key union all select s4.key from src s4 join src s5 on s4.key = s5.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as select s2.key as key from src s2 join src s3 on s2.key = s3.key union all select s4.key from src s4 join src s5 on s4.key = s5.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by key limit 30 PREHOOK: type: QUERY @@ -1190,12 +1205,15 @@ select u.key as ukey, s.key as skey from right outer join src s on u.key = s.key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@ut POSTHOOK: query: create table ut as select u.key as ukey, s.key as skey from (select * from src union all select * from src) u right outer join src s on u.key = s.key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@ut PREHOOK: query: select * from ut order by ukey, skey limit 20 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out index d6b80b3..5a3aac1 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out @@ -5,6 +5,8 @@ PREHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_vgby POSTHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, @@ -12,6 +14,7 @@ POSTHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_vgby PREHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), diff --git a/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out b/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out index 8d28abd..d3e3923 100644 --- a/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorization_part_project.q.out @@ -65,28 +65,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_part - Statistics: Num rows: 200 Data size: 4068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (cdouble + 2) (type: double) outputColumnNames: _col0 - Statistics: Num rows: 200 Data size: 4068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + - Statistics: Num rows: 200 Data size: 4068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 200 Data size: 4068 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 200 Data size: 41576 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out index 88f9e5f..982e8ba 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out @@ -4633,6 +4633,8 @@ from part group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part +PREHOOK: Output: database:default +PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 16. testViewAsTableInputToPTF create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, @@ -4641,6 +4643,7 @@ from part group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@part +POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: explain extended select p_mfgr, p_brand, s, diff --git a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out index 8b39f9c..b8e46e9 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out @@ -106,15 +106,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: to_unix_timestamp(ctimestamp1) (type: bigint), year(ctimestamp1) (type: int), month(ctimestamp1) (type: int), day(ctimestamp1) (type: int), dayofmonth(ctimestamp1) (type: int), weekofyear(ctimestamp1) (type: int), hour(ctimestamp1) (type: int), minute(ctimestamp1) (type: int), second(ctimestamp1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Reducer 2 @@ -122,10 +122,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -249,15 +249,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Reducer 2 @@ -265,10 +265,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -392,15 +392,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_string - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1)) (type: boolean), (year(ctimestamp1) = year(stimestamp1)) (type: boolean), (month(ctimestamp1) = month(stimestamp1)) (type: boolean), (day(ctimestamp1) = day(stimestamp1)) (type: boolean), (dayofmonth(ctimestamp1) = dayofmonth(stimestamp1)) (type: boolean), (weekofyear(ctimestamp1) = weekofyear(stimestamp1)) (type: boolean), (hour(ctimestamp1) = hour(stimestamp1)) (type: boolean), (minute(ctimestamp1) = minute(stimestamp1)) (type: boolean), (second(ctimestamp1) = second(stimestamp1)) (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: boolean) sort order: + - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: boolean), _col2 (type: boolean), _col3 (type: boolean), _col4 (type: boolean), _col5 (type: boolean), _col6 (type: boolean), _col7 (type: boolean), _col8 (type: boolean) Execution mode: vectorized Reducer 2 @@ -408,10 +408,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: boolean), VALUE._col1 (type: boolean), VALUE._col2 (type: boolean), VALUE._col3 (type: boolean), VALUE._col4 (type: boolean), VALUE._col5 (type: boolean), VALUE._col6 (type: boolean), VALUE._col7 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 40 Data size: 622 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -539,15 +539,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_wrong - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Reducer 2 @@ -555,10 +555,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/touch.q.out b/ql/src/test/results/clientpositive/touch.q.out index 7ea3807..c239715 100644 --- a/ql/src/test/results/clientpositive/touch.q.out +++ b/ql/src/test/results/clientpositive/touch.q.out @@ -1,89 +1,95 @@ -PREHOOK: query: drop table tstsrc -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrc -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table tstsrcpart -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table tstsrcpart -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table tstsrc like src +PREHOOK: query: create database tc +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:tc +POSTHOOK: query: create database tc +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:tc +PREHOOK: query: create table tc.tstsrc like default.src PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrc -POSTHOOK: query: create table tstsrc like src +PREHOOK: Output: database:tc +PREHOOK: Output: tc@tstsrc +POSTHOOK: query: create table tc.tstsrc like default.src POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrc -PREHOOK: query: insert overwrite table tstsrc select key, value from src +POSTHOOK: Output: database:tc +POSTHOOK: Output: tc@tstsrc +PREHOOK: query: insert overwrite table tc.tstsrc select key, value from default.src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@tstsrc -POSTHOOK: query: insert overwrite table tstsrc select key, value from src +PREHOOK: Output: tc@tstsrc +POSTHOOK: query: insert overwrite table tc.tstsrc select key, value from default.src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@tstsrc +POSTHOOK: Output: tc@tstsrc POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: create table tstsrcpart like srcpart +PREHOOK: query: create table tc.tstsrcpart like default.srcpart PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: create table tstsrcpart like srcpart +PREHOOK: Output: database:tc +PREHOOK: Output: tc@tstsrcpart +POSTHOOK: query: create table tc.tstsrcpart like default.srcpart POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') -select key, value from srcpart where ds='2008-04-08' and hr='12' +POSTHOOK: Output: database:tc +POSTHOOK: Output: tc@tstsrcpart +PREHOOK: query: insert overwrite table tc.tstsrcpart partition (ds='2008-04-08', hr='12') +select key, value from default.srcpart where ds='2008-04-08' and hr='12' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12') -select key, value from srcpart where ds='2008-04-08' and hr='12' +PREHOOK: Output: tc@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: query: insert overwrite table tc.tstsrcpart partition (ds='2008-04-08', hr='12') +select key, value from default.srcpart where ds='2008-04-08' and hr='12' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: tc@tstsrcpart@ds=2008-04-08/hr=12 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: ALTER TABLE tstsrc TOUCH +PREHOOK: query: ALTER TABLE tc.tstsrc TOUCH PREHOOK: type: ALTERTABLE_TOUCH -PREHOOK: Input: default@tstsrc -PREHOOK: Output: default@tstsrc -POSTHOOK: query: ALTER TABLE tstsrc TOUCH +PREHOOK: Input: tc@tstsrc +PREHOOK: Output: tc@tstsrc +POSTHOOK: query: ALTER TABLE tc.tstsrc TOUCH POSTHOOK: type: ALTERTABLE_TOUCH -POSTHOOK: Input: default@tstsrc -POSTHOOK: Output: default@tstsrc -PREHOOK: query: ALTER TABLE tstsrcpart TOUCH +POSTHOOK: Input: tc@tstsrc +POSTHOOK: Output: tc@tstsrc +PREHOOK: query: ALTER TABLE tc.tstsrcpart TOUCH PREHOOK: type: ALTERTABLE_TOUCH -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: ALTER TABLE tstsrcpart TOUCH +PREHOOK: Input: tc@tstsrcpart +PREHOOK: Output: tc@tstsrcpart +POSTHOOK: query: ALTER TABLE tc.tstsrcpart TOUCH POSTHOOK: type: ALTERTABLE_TOUCH -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: ALTER TABLE tstsrcpart TOUCH PARTITION (ds='2008-04-08', hr='12') +POSTHOOK: Input: tc@tstsrcpart +POSTHOOK: Output: tc@tstsrcpart +PREHOOK: query: ALTER TABLE tc.tstsrcpart TOUCH PARTITION (ds='2008-04-08', hr='12') PREHOOK: type: ALTERTABLE_TOUCH -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: query: ALTER TABLE tstsrcpart TOUCH PARTITION (ds='2008-04-08', hr='12') +PREHOOK: Input: tc@tstsrcpart +PREHOOK: Output: tc@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: query: ALTER TABLE tc.tstsrcpart TOUCH PARTITION (ds='2008-04-08', hr='12') POSTHOOK: type: ALTERTABLE_TOUCH -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12 -POSTHOOK: Output: default@tstsrcpart@ds=2008-04-08/hr=12 -PREHOOK: query: drop table tstsrc +POSTHOOK: Input: tc@tstsrcpart +POSTHOOK: Input: tc@tstsrcpart@ds=2008-04-08/hr=12 +POSTHOOK: Output: tc@tstsrcpart@ds=2008-04-08/hr=12 +PREHOOK: query: drop table tc.tstsrc PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrc -PREHOOK: Output: default@tstsrc -POSTHOOK: query: drop table tstsrc +PREHOOK: Input: tc@tstsrc +PREHOOK: Output: tc@tstsrc +POSTHOOK: query: drop table tc.tstsrc POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrc -POSTHOOK: Output: default@tstsrc -PREHOOK: query: drop table tstsrcpart +POSTHOOK: Input: tc@tstsrc +POSTHOOK: Output: tc@tstsrc +PREHOOK: query: drop table tc.tstsrcpart PREHOOK: type: DROPTABLE -PREHOOK: Input: default@tstsrcpart -PREHOOK: Output: default@tstsrcpart -POSTHOOK: query: drop table tstsrcpart +PREHOOK: Input: tc@tstsrcpart +PREHOOK: Output: tc@tstsrcpart +POSTHOOK: query: drop table tc.tstsrcpart POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@tstsrcpart -POSTHOOK: Output: default@tstsrcpart +POSTHOOK: Input: tc@tstsrcpart +POSTHOOK: Output: tc@tstsrcpart +PREHOOK: query: drop database tc +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:tc +PREHOOK: Output: database:tc +POSTHOOK: query: drop database tc +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:tc +POSTHOOK: Output: database:tc diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out index 76c1adb..77ae018 100644 --- a/ql/src/test/results/clientpositive/union24.q.out +++ b/ql/src/test/results/clientpositive/union24.q.out @@ -3,32 +3,44 @@ PREHOOK: query: -- SORT_QUERY_RESULTS create table src2 as select key, count(1) as count from src group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src2 POSTHOOK: query: -- SORT_QUERY_RESULTS create table src2 as select key, count(1) as count from src group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@src2 PREHOOK: query: create table src3 as select * from src2 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src2 +PREHOOK: Output: database:default +PREHOOK: Output: default@src3 POSTHOOK: query: create table src3 as select * from src2 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@src3 PREHOOK: query: create table src4 as select * from src2 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src2 +PREHOOK: Output: database:default +PREHOOK: Output: default@src4 POSTHOOK: query: create table src4 as select * from src2 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@src4 PREHOOK: query: create table src5 as select * from src2 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src2 +PREHOOK: Output: database:default +PREHOOK: Output: default@src5 POSTHOOK: query: create table src5 as select * from src2 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src2 +POSTHOOK: Output: database:default POSTHOOK: Output: default@src5 PREHOOK: query: explain extended select s.key, s.count from ( diff --git a/ql/src/test/results/clientpositive/union25.q.out b/ql/src/test/results/clientpositive/union25.q.out index 5be298e..01f4fcc 100644 --- a/ql/src/test/results/clientpositive/union25.q.out +++ b/ql/src/test/results/clientpositive/union25.q.out @@ -210,7 +210,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: tmp_unionall + name: default.tmp_unionall Stage: Stage-3 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out index 99cfa6f..da37707 100644 --- a/ql/src/test/results/clientpositive/union27.q.out +++ b/ql/src/test/results/clientpositive/union27.q.out @@ -2,24 +2,33 @@ PREHOOK: query: -- SORT_BEFORE_DIFF create table jackson_sev_same as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@jackson_sev_same POSTHOOK: query: -- SORT_BEFORE_DIFF create table jackson_sev_same as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@jackson_sev_same PREHOOK: query: create table dim_pho as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@dim_pho POSTHOOK: query: create table dim_pho as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@dim_pho PREHOOK: query: create table jackson_sev_add as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@jackson_sev_add POSTHOOK: query: create table jackson_sev_add as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@jackson_sev_add PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/union31.q.out b/ql/src/test/results/clientpositive/union31.q.out index 97bb535..faf17f4 100644 --- a/ql/src/test/results/clientpositive/union31.q.out +++ b/ql/src/test/results/clientpositive/union31.q.out @@ -13,16 +13,22 @@ POSTHOOK: type: DROPTABLE PREHOOK: query: create table t1 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 POSTHOOK: query: create table t1 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 PREHOOK: query: create table t2 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 POSTHOOK: query: create table t2 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 PREHOOK: query: create table t3(key string, cnt int) PREHOOK: type: CREATETABLE @@ -598,16 +604,22 @@ POSTHOOK: Output: default@t2 PREHOOK: query: create table t1 as select * from src where key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 POSTHOOK: query: create table t1 as select * from src where key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 POSTHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 PREHOOK: query: create table t7(c1 string, cnt int) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/union32.q.out b/ql/src/test/results/clientpositive/union32.q.out index b353db9..046a9fc 100644 --- a/ql/src/test/results/clientpositive/union32.q.out +++ b/ql/src/test/results/clientpositive/union32.q.out @@ -6,6 +6,8 @@ PREHOOK: query: -- SORT_QUERY_RESULTS CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 POSTHOOK: query: -- SORT_QUERY_RESULTS -- This tests various union queries which have columns on one side of the query @@ -14,13 +16,17 @@ POSTHOOK: query: -- SORT_QUERY_RESULTS CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 POSTHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 PREHOOK: query: -- Test simple union with double EXPLAIN diff --git a/ql/src/test/results/clientpositive/union_top_level.q.out b/ql/src/test/results/clientpositive/union_top_level.q.out index 10694b2..1799d9d 100644 --- a/ql/src/test/results/clientpositive/union_top_level.q.out +++ b/ql/src/test/results/clientpositive/union_top_level.q.out @@ -580,7 +580,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: union_top + name: default.union_top Stage: Stage-3 Stats-Aggr Operator @@ -693,6 +693,8 @@ union all select key, 2 as value from src where key % 3 == 2 limit 3 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@union_top POSTHOOK: query: create table union_top as select key, 0 as value from src where key % 3 == 0 limit 3 union all @@ -701,6 +703,7 @@ union all select key, 2 as value from src where key % 3 == 2 limit 3 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@union_top PREHOOK: query: select * from union_top PREHOOK: type: QUERY @@ -1305,7 +1308,7 @@ union all select `src`.`key`, 1 as `value` from `default`.`src` where `src`.`key` % 3 == 1 limit 3 union all select `src`.`key`, 2 as `value` from `default`.`src` where `src`.`key` % 3 == 2 limit 3 - name: union_top_view + name: default.union_top_view original text: select key, 0 as value from src where key % 3 == 0 limit 3 union all select key, 1 as value from src where key % 3 == 1 limit 3 @@ -1320,6 +1323,8 @@ union all select key, 2 as value from src where key % 3 == 2 limit 3 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@union_top_view POSTHOOK: query: create view union_top_view as select key, 0 as value from src where key % 3 == 0 limit 3 union all @@ -1328,6 +1333,7 @@ union all select key, 2 as value from src where key % 3 == 2 limit 3 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@union_top_view PREHOOK: query: select * from union_top_view PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/unset_table_view_property.q.out b/ql/src/test/results/clientpositive/unset_table_view_property.q.out index 8cf6686..8249246 100644 --- a/ql/src/test/results/clientpositive/unset_table_view_property.q.out +++ b/ql/src/test/results/clientpositive/unset_table_view_property.q.out @@ -1,29 +1,35 @@ -PREHOOK: query: CREATE TABLE testTable(col1 INT, col2 INT) +PREHOOK: query: CREATE DATABASE vt +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:vt +POSTHOOK: query: CREATE DATABASE vt +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:vt +PREHOOK: query: CREATE TABLE vt.testTable(col1 INT, col2 INT) PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@testTable -POSTHOOK: query: CREATE TABLE testTable(col1 INT, col2 INT) +PREHOOK: Output: database:vt +PREHOOK: Output: vt@testTable +POSTHOOK: query: CREATE TABLE vt.testTable(col1 INT, col2 INT) POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@testTable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@testTable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### PREHOOK: query: -- UNSET TABLE PROPERTIES -ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'c'='3') +ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- UNSET TABLE PROPERTIES -ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'c'='3') +ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false a 1 @@ -35,18 +41,18 @@ rawDataSize -1 totalSize 0 #### A masked pattern was here #### PREHOOK: query: -- UNSET all the properties -ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'c') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- UNSET all the properties -ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'c') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false #### A masked pattern was here #### @@ -55,17 +61,17 @@ numRows -1 rawDataSize -1 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'c'='3', 'd'='4') +PREHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3', 'd'='4') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable -POSTHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'c'='3', 'd'='4') +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable +POSTHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3', 'd'='4') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false a 1 @@ -78,18 +84,18 @@ rawDataSize -1 totalSize 0 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties -ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'd') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- UNSET a subset of the properties -ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'd') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false c 3 @@ -100,18 +106,18 @@ rawDataSize -1 totalSize 0 #### A masked pattern was here #### PREHOOK: query: -- the same property being UNSET multiple times -ALTER TABLE testTable UNSET TBLPROPERTIES ('c', 'c', 'c') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- the same property being UNSET multiple times -ALTER TABLE testTable UNSET TBLPROPERTIES ('c', 'c', 'c') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false #### A masked pattern was here #### @@ -120,17 +126,17 @@ numRows -1 rawDataSize -1 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'b' = '2', 'c'='3', 'd'='4') +PREHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'b' = '2', 'c'='3', 'd'='4') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable -POSTHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'b' = '2', 'c'='3', 'd'='4') +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable +POSTHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'b' = '2', 'c'='3', 'd'='4') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false a 1 @@ -144,18 +150,18 @@ rawDataSize -1 totalSize 0 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false a 1 @@ -167,18 +173,18 @@ rawDataSize -1 totalSize 0 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') PREHOOK: type: ALTERTABLE_PROPERTIES -PREHOOK: Input: default@testtable -PREHOOK: Output: default@testtable +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') +ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') POSTHOOK: type: ALTERTABLE_PROPERTIES -POSTHOOK: Input: default@testtable -POSTHOOK: Output: default@testtable -PREHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable +PREHOOK: query: SHOW TBLPROPERTIES vt.testTable PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testTable +POSTHOOK: query: SHOW TBLPROPERTIES vt.testTable POSTHOOK: type: SHOW_TBLPROPERTIES COLUMN_STATS_ACCURATE false a 1 @@ -188,57 +194,68 @@ numRows -1 rawDataSize -1 totalSize 0 #### A masked pattern was here #### +PREHOOK: query: DROP TABLE vt.testTable +PREHOOK: type: DROPTABLE +PREHOOK: Input: vt@testtable +PREHOOK: Output: vt@testtable +POSTHOOK: query: DROP TABLE vt.testTable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: vt@testtable +POSTHOOK: Output: vt@testtable PREHOOK: query: -- UNSET VIEW PROPERTIES -CREATE VIEW testView AS SELECT value FROM src WHERE key=86 +CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:vt +PREHOOK: Output: vt@testView POSTHOOK: query: -- UNSET VIEW PROPERTIES -CREATE VIEW testView AS SELECT value FROM src WHERE key=86 +CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src -POSTHOOK: Output: default@testView -PREHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propB'='200') +POSTHOOK: Output: database:vt +POSTHOOK: Output: vt@testView +PREHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propB'='200') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview -POSTHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propB'='200') +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview +POSTHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propB'='200') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propA 100 propB 200 #### A masked pattern was here #### PREHOOK: query: -- UNSET all the properties -ALTER VIEW testView UNSET TBLPROPERTIES ('propA', 'propB') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview POSTHOOK: query: -- UNSET all the properties -ALTER VIEW testView UNSET TBLPROPERTIES ('propA', 'propB') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### -PREHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propC'='300', 'propD'='400') +PREHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propC'='300', 'propD'='400') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview -POSTHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propC'='300', 'propD'='400') +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview +POSTHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propC'='300', 'propD'='400') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propA 100 @@ -246,48 +263,48 @@ propC 300 propD 400 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties -ALTER VIEW testView UNSET TBLPROPERTIES ('propA', 'propC') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview POSTHOOK: query: -- UNSET a subset of the properties -ALTER VIEW testView UNSET TBLPROPERTIES ('propA', 'propC') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propD 400 #### A masked pattern was here #### PREHOOK: query: -- the same property being UNSET multiple times -ALTER VIEW testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview POSTHOOK: query: -- the same property being UNSET multiple times -ALTER VIEW testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') +ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### -PREHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propB' = '200', 'propC'='300', 'propD'='400') +PREHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propB' = '200', 'propC'='300', 'propD'='400') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview -POSTHOOK: query: ALTER VIEW testView SET TBLPROPERTIES ('propA'='100', 'propB' = '200', 'propC'='300', 'propD'='400') +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview +POSTHOOK: query: ALTER VIEW vt.testView SET TBLPROPERTIES ('propA'='100', 'propB' = '200', 'propC'='300', 'propD'='400') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propA 100 @@ -296,37 +313,53 @@ propC 300 propD 400 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') +ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') +ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propA 100 propB 200 #### A masked pattern was here #### PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') +ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') PREHOOK: type: ALTERVIEW_PROPERTIES -PREHOOK: Input: default@testview -PREHOOK: Output: default@testview +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') +ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') POSTHOOK: type: ALTERVIEW_PROPERTIES -POSTHOOK: Input: default@testview -POSTHOOK: Output: default@testview -PREHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: SHOW TBLPROPERTIES vt.testView PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: SHOW TBLPROPERTIES testView +POSTHOOK: query: SHOW TBLPROPERTIES vt.testView POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propA 100 #### A masked pattern was here #### +PREHOOK: query: DROP VIEW vt.testView +PREHOOK: type: DROPVIEW +PREHOOK: Input: vt@testview +PREHOOK: Output: vt@testview +POSTHOOK: query: DROP VIEW vt.testView +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: vt@testview +POSTHOOK: Output: vt@testview +PREHOOK: query: DROP DATABASE vt +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:vt +PREHOOK: Output: database:vt +POSTHOOK: query: DROP DATABASE vt +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:vt +POSTHOOK: Output: database:vt diff --git a/ql/src/test/results/clientpositive/updateAccessTime.q.out b/ql/src/test/results/clientpositive/updateAccessTime.q.out index 08d697c..104e155 100644 --- a/ql/src/test/results/clientpositive/updateAccessTime.q.out +++ b/ql/src/test/results/clientpositive/updateAccessTime.q.out @@ -5,9 +5,12 @@ POSTHOOK: type: DROPTABLE PREHOOK: query: create table tstsrc as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@tstsrc POSTHOOK: query: create table tstsrc as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@tstsrc PREHOOK: query: desc extended tstsrc PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/varchar_nested_types.q.out b/ql/src/test/results/clientpositive/varchar_nested_types.q.out index 55bd967..1cd232a 100644 --- a/ql/src/test/results/clientpositive/varchar_nested_types.q.out +++ b/ql/src/test/results/clientpositive/varchar_nested_types.q.out @@ -164,11 +164,14 @@ create table varchar_nested_cta as select * from varchar_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@varchar_nested_struct +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_nested_cta POSTHOOK: query: -- nested type with create table as create table varchar_nested_cta as select * from varchar_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@varchar_nested_struct +POSTHOOK: Output: database:default POSTHOOK: Output: default@varchar_nested_cta PREHOOK: query: describe varchar_nested_cta PREHOOK: type: DESCTABLE @@ -191,11 +194,14 @@ create table varchar_nested_view as select * from varchar_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@varchar_nested_struct +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_nested_view POSTHOOK: query: -- nested type with view create table varchar_nested_view as select * from varchar_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@varchar_nested_struct +POSTHOOK: Output: database:default POSTHOOK: Output: default@varchar_nested_view PREHOOK: query: describe varchar_nested_view PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out index e7b64e2..631ac19 100644 --- a/ql/src/test/results/clientpositive/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/vector_between_in.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_date_test POSTHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_date_test PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out index 874836d..c412782 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out @@ -5,6 +5,8 @@ PREHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_vgby POSTHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, @@ -12,6 +14,7 @@ POSTHOOK: query: CREATE TABLE decimal_vgby STORED AS ORC AS FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_vgby PREHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out index def9f8d..ca94561 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_test POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_test PREHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index 4a90849..d9eb311 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -5,6 +5,8 @@ PREHOOK: query: CREATE TABLE decimal_mapjoin STORED AS ORC AS FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_mapjoin POSTHOOK: query: CREATE TABLE decimal_mapjoin STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, @@ -12,6 +14,7 @@ POSTHOOK: query: CREATE TABLE decimal_mapjoin STORED AS ORC AS FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_mapjoin PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 FROM decimal_mapjoin l diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out index 2e98ceb..2cfa45a 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_test POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@decimal_test PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. diff --git a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out index 6c89f71..ebbb24f 100644 --- a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out +++ b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out @@ -1,9 +1,12 @@ PREHOOK: query: CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@date_decimal_test POSTHOOK: query: CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@date_decimal_test PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/vectorized_ptf.q.out index 202ec40..c81a214 100644 --- a/ql/src/test/results/clientpositive/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/vectorized_ptf.q.out @@ -5316,6 +5316,8 @@ from part group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part +PREHOOK: Output: database:default +PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 16. testViewAsTableInputToPTF create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, @@ -5324,6 +5326,7 @@ from part group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@part +POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: explain extended select p_mfgr, p_brand, s, diff --git a/ql/src/test/results/clientpositive/view.q.out b/ql/src/test/results/clientpositive/view.q.out index 0dce318..c7505b8 100644 --- a/ql/src/test/results/clientpositive/view.q.out +++ b/ql/src/test/results/clientpositive/view.q.out @@ -54,77 +54,101 @@ PREHOOK: query: -- relative reference, no alias CREATE VIEW v1 AS SELECT * FROM table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v1 POSTHOOK: query: -- relative reference, no alias CREATE VIEW v1 AS SELECT * FROM table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v1 PREHOOK: query: -- relative reference, aliased CREATE VIEW v2 AS SELECT t1.* FROM table1 t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v2 POSTHOOK: query: -- relative reference, aliased CREATE VIEW v2 AS SELECT t1.* FROM table1 t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v2 PREHOOK: query: -- relative reference, multiple tables CREATE VIEW v3 AS SELECT t1.*, t2.key k FROM table1 t1 JOIN table2 t2 ON t1.key = t2.key PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 PREHOOK: Input: db1@table2 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v3 POSTHOOK: query: -- relative reference, multiple tables CREATE VIEW v3 AS SELECT t1.*, t2.key k FROM table1 t1 JOIN table2 t2 ON t1.key = t2.key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 POSTHOOK: Input: db1@table2 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v3 PREHOOK: query: -- absolute reference, no alias CREATE VIEW v4 AS SELECT * FROM db1.table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v4 POSTHOOK: query: -- absolute reference, no alias CREATE VIEW v4 AS SELECT * FROM db1.table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v4 PREHOOK: query: -- absolute reference, aliased CREATE VIEW v5 AS SELECT t1.* FROM db1.table1 t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v5 POSTHOOK: query: -- absolute reference, aliased CREATE VIEW v5 AS SELECT t1.* FROM db1.table1 t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v5 PREHOOK: query: -- absolute reference, multiple tables CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1 t1 JOIN db1.table2 t2 ON t1.key = t2.key PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 PREHOOK: Input: db1@table2 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v6 POSTHOOK: query: -- absolute reference, multiple tables CREATE VIEW v6 AS SELECT t1.*, t2.key k FROM db1.table1 t1 JOIN db1.table2 t2 ON t1.key = t2.key POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 POSTHOOK: Input: db1@table2 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v6 PREHOOK: query: -- relative reference, explicit column CREATE VIEW v7 AS SELECT key from table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v7 POSTHOOK: query: -- relative reference, explicit column CREATE VIEW v7 AS SELECT key from table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v7 PREHOOK: query: -- absolute reference, explicit column CREATE VIEW v8 AS SELECT key from db1.table1 PREHOOK: type: CREATEVIEW PREHOOK: Input: db1@table1 +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@v8 POSTHOOK: query: -- absolute reference, explicit column CREATE VIEW v8 AS SELECT key from db1.table1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: db1@table1 +POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@v8 PREHOOK: query: CREATE DATABASE db2 PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientpositive/view_cast.q.out b/ql/src/test/results/clientpositive/view_cast.q.out index cf4a1f4..34444ae 100644 --- a/ql/src/test/results/clientpositive/view_cast.q.out +++ b/ql/src/test/results/clientpositive/view_cast.q.out @@ -56,9 +56,12 @@ POSTHOOK: type: DROPVIEW PREHOOK: query: CREATE VIEW aa_view_tw AS SELECT ks_uid, sr_id, act, at_ks_uid, at_sr_uid, from_unixtime(CAST(CAST( tstamp as BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS act_date, from_unixtime(CAST(CAST( original_tstamp AS BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS content_creation_date FROM atab WHERE dt='20130312' AND nt='tw' AND ks_uid != at_ks_uid PREHOOK: type: CREATEVIEW PREHOOK: Input: default@atab +PREHOOK: Output: database:default +PREHOOK: Output: default@aa_view_tw POSTHOOK: query: CREATE VIEW aa_view_tw AS SELECT ks_uid, sr_id, act, at_ks_uid, at_sr_uid, from_unixtime(CAST(CAST( tstamp as BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS act_date, from_unixtime(CAST(CAST( original_tstamp AS BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS content_creation_date FROM atab WHERE dt='20130312' AND nt='tw' AND ks_uid != at_ks_uid POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@atab +POSTHOOK: Output: database:default POSTHOOK: Output: default@aa_view_tw PREHOOK: query: DROP VIEW IF EXISTS joined_aa_view_tw PREHOOK: type: DROPVIEW @@ -69,11 +72,14 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@aa_view_tw PREHOOK: Input: default@atab PREHOOK: Input: default@mstab +PREHOOK: Output: database:default +PREHOOK: Output: default@joined_aa_view_tw POSTHOOK: query: CREATE VIEW joined_aa_view_tw AS SELECT aa.ks_uid, aa.sr_id, aa.act, at_sr_uid, aa.act_date, aa.at_ks_uid, aa.content_creation_date, coalesce( other.ksc, 10.0) AS at_ksc, coalesce( self.ksc , 10.0 ) AS self_ksc FROM aa_view_tw aa LEFT OUTER JOIN ( SELECT ks_uid, csc AS ksc FROM mstab WHERE dt='20130311' ) self ON ( CAST(aa.ks_uid AS BIGINT) = CAST(self.ks_uid AS BIGINT) ) LEFT OUTER JOIN ( SELECT ks_uid, csc AS ksc FROM mstab WHERE dt='20130311' ) other ON ( CAST(aa.at_ks_uid AS BIGINT) = CAST(other.ks_uid AS BIGINT) ) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@aa_view_tw POSTHOOK: Input: default@atab POSTHOOK: Input: default@mstab +POSTHOOK: Output: database:default POSTHOOK: Output: default@joined_aa_view_tw PREHOOK: query: SELECT * FROM joined_aa_view_tw PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/view_inputs.q.out b/ql/src/test/results/clientpositive/view_inputs.q.out index 0df9a4c..e9160f3 100644 --- a/ql/src/test/results/clientpositive/view_inputs.q.out +++ b/ql/src/test/results/clientpositive/view_inputs.q.out @@ -3,20 +3,26 @@ PREHOOK: query: -- Tests that selecting from a view and another view that select CREATE VIEW test_view1 AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@test_view1 POSTHOOK: query: -- Tests that selecting from a view and another view that selects from that same view CREATE VIEW test_view1 AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@test_view1 PREHOOK: query: CREATE VIEW test_view2 AS SELECT * FROM test_view1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Input: default@test_view1 +PREHOOK: Output: database:default +PREHOOK: Output: default@test_view2 POSTHOOK: query: CREATE VIEW test_view2 AS SELECT * FROM test_view1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Input: default@test_view1 +POSTHOOK: Output: database:default POSTHOOK: Output: default@test_view2 PREHOOK: query: SELECT COUNT(*) FROM test_view1 a JOIN test_view2 b ON a.key = b.key PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/windowing.q.out b/ql/src/test/results/clientpositive/windowing.q.out index b72f4c0..31d0511 100644 --- a/ql/src/test/results/clientpositive/windowing.q.out +++ b/ql/src/test/results/clientpositive/windowing.q.out @@ -1024,6 +1024,8 @@ from part group by p_mfgr, p_brand PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part +PREHOOK: Output: database:default +PREHOOK: Output: default@mfgr_price_view POSTHOOK: query: -- 22. testViewAsTableInputWithWindowing create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, @@ -1032,6 +1034,7 @@ from part group by p_mfgr, p_brand POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@part +POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_price_view PREHOOK: query: select * from ( @@ -1113,6 +1116,8 @@ from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row) PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part +PREHOOK: Output: database:default +PREHOOK: Output: default@mfgr_brand_price_view POSTHOOK: query: -- 23. testCreateViewWithWindowingQuery create view IF NOT EXISTS mfgr_brand_price_view as select p_mfgr, p_brand, @@ -1121,6 +1126,7 @@ from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and current row) POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@part +POSTHOOK: Output: database:default POSTHOOK: Output: default@mfgr_brand_price_view PREHOOK: query: select * from mfgr_brand_price_view PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out index e5bc4f4..4256640 100644 --- a/ql/src/test/results/clientpositive/windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out @@ -614,6 +614,8 @@ SELECT explode( ) s1 lateral view explode(barr) arr as b PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@wtest POSTHOOK: query: create table wtest as select a, b from @@ -628,6 +630,7 @@ SELECT explode( ) s1 lateral view explode(barr) arr as b POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src +POSTHOOK: Output: database:default POSTHOOK: Output: default@wtest PREHOOK: query: select a, b, first_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) , diff --git a/ql/src/test/results/clientpositive/windowing_streaming.q.out b/ql/src/test/results/clientpositive/windowing_streaming.q.out index 90ba45a..6765247 100644 --- a/ql/src/test/results/clientpositive/windowing_streaming.q.out +++ b/ql/src/test/results/clientpositive/windowing_streaming.q.out @@ -347,10 +347,13 @@ PREHOOK: query: create table sB ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' S select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@sB POSTHOOK: query: create table sB ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@sB PREHOOK: query: select * from sB where ctinyint is null @@ -412,10 +415,13 @@ PREHOOK: query: create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' S select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@sD POSTHOOK: query: create table sD ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE as select * from (select ctinyint, cdouble, rank() over(partition by ctinyint order by cdouble) r from alltypesorc) a where r < 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default POSTHOOK: Output: default@sD PREHOOK: query: select * from sD where ctinyint is null diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java index ff114c0..f429709 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java @@ -64,15 +64,16 @@ private HiveCharWritable getWritableWithParams(HiveChar val) { } public Object set(Object o, HiveChar value) { - HiveChar setValue = (HiveChar) o; - setValue.setValue(value, getMaxLength()); - return setValue; + if (BaseCharUtils.doesPrimitiveMatchTypeParams(value, + (CharTypeInfo) typeInfo)) { + return value; + } else { + return new HiveChar(value, getMaxLength()); + } } public Object set(Object o, String value) { - HiveChar setValue = (HiveChar) o; - setValue.setValue(value, getMaxLength()); - return setValue; + return new HiveChar(value, getMaxLength()); } public Object create(HiveChar value) { diff --git a/service/if/TCLIService.thrift b/service/if/TCLIService.thrift index 80086b4..4024bb3 100644 --- a/service/if/TCLIService.thrift +++ b/service/if/TCLIService.thrift @@ -1054,6 +1054,9 @@ struct TFetchResultsReq { // Max number of rows that should be returned in // the rowset. 3: required i64 maxRows + + // The type of a fetch results request. 0 represents Query output. 1 represents Log + 4: optional i16 fetchType = 0 } struct TFetchResultsResp { diff --git a/service/src/gen/thrift/gen-cpp/TCLIService_types.cpp b/service/src/gen/thrift/gen-cpp/TCLIService_types.cpp index d5f98a8..326d25b 100644 --- a/service/src/gen/thrift/gen-cpp/TCLIService_types.cpp +++ b/service/src/gen/thrift/gen-cpp/TCLIService_types.cpp @@ -6137,8 +6137,8 @@ void swap(TGetResultSetMetadataResp &a, TGetResultSetMetadataResp &b) { swap(a.__isset, b.__isset); } -const char* TFetchResultsReq::ascii_fingerprint = "1B96A8C05BA9DD699FC8CD842240ABDE"; -const uint8_t TFetchResultsReq::binary_fingerprint[16] = {0x1B,0x96,0xA8,0xC0,0x5B,0xA9,0xDD,0x69,0x9F,0xC8,0xCD,0x84,0x22,0x40,0xAB,0xDE}; +const char* TFetchResultsReq::ascii_fingerprint = "B4CB1E4F8F8F4D50183DD372AD11753A"; +const uint8_t TFetchResultsReq::binary_fingerprint[16] = {0xB4,0xCB,0x1E,0x4F,0x8F,0x8F,0x4D,0x50,0x18,0x3D,0xD3,0x72,0xAD,0x11,0x75,0x3A}; uint32_t TFetchResultsReq::read(::apache::thrift::protocol::TProtocol* iprot) { @@ -6189,6 +6189,14 @@ uint32_t TFetchResultsReq::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->fetchType); + this->__isset.fetchType = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6223,6 +6231,11 @@ uint32_t TFetchResultsReq::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeI64(this->maxRows); xfer += oprot->writeFieldEnd(); + if (this->__isset.fetchType) { + xfer += oprot->writeFieldBegin("fetchType", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->fetchType); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6233,6 +6246,8 @@ void swap(TFetchResultsReq &a, TFetchResultsReq &b) { swap(a.operationHandle, b.operationHandle); swap(a.orientation, b.orientation); swap(a.maxRows, b.maxRows); + swap(a.fetchType, b.fetchType); + swap(a.__isset, b.__isset); } const char* TFetchResultsResp::ascii_fingerprint = "FC43BC2D6F3B76D4DB0F34226A745C8E"; diff --git a/service/src/gen/thrift/gen-cpp/TCLIService_types.h b/service/src/gen/thrift/gen-cpp/TCLIService_types.h index 1b37fb5..f32dc3c 100644 --- a/service/src/gen/thrift/gen-cpp/TCLIService_types.h +++ b/service/src/gen/thrift/gen-cpp/TCLIService_types.h @@ -3602,14 +3602,18 @@ class TGetResultSetMetadataResp { void swap(TGetResultSetMetadataResp &a, TGetResultSetMetadataResp &b); +typedef struct _TFetchResultsReq__isset { + _TFetchResultsReq__isset() : fetchType(true) {} + bool fetchType; +} _TFetchResultsReq__isset; class TFetchResultsReq { public: - static const char* ascii_fingerprint; // = "1B96A8C05BA9DD699FC8CD842240ABDE"; - static const uint8_t binary_fingerprint[16]; // = {0x1B,0x96,0xA8,0xC0,0x5B,0xA9,0xDD,0x69,0x9F,0xC8,0xCD,0x84,0x22,0x40,0xAB,0xDE}; + static const char* ascii_fingerprint; // = "B4CB1E4F8F8F4D50183DD372AD11753A"; + static const uint8_t binary_fingerprint[16]; // = {0xB4,0xCB,0x1E,0x4F,0x8F,0x8F,0x4D,0x50,0x18,0x3D,0xD3,0x72,0xAD,0x11,0x75,0x3A}; - TFetchResultsReq() : orientation((TFetchOrientation::type)0), maxRows(0) { + TFetchResultsReq() : orientation((TFetchOrientation::type)0), maxRows(0), fetchType(0) { orientation = (TFetchOrientation::type)0; } @@ -3619,6 +3623,9 @@ class TFetchResultsReq { TOperationHandle operationHandle; TFetchOrientation::type orientation; int64_t maxRows; + int16_t fetchType; + + _TFetchResultsReq__isset __isset; void __set_operationHandle(const TOperationHandle& val) { operationHandle = val; @@ -3632,6 +3639,11 @@ class TFetchResultsReq { maxRows = val; } + void __set_fetchType(const int16_t val) { + fetchType = val; + __isset.fetchType = true; + } + bool operator == (const TFetchResultsReq & rhs) const { if (!(operationHandle == rhs.operationHandle)) @@ -3640,6 +3652,10 @@ class TFetchResultsReq { return false; if (!(maxRows == rhs.maxRows)) return false; + if (__isset.fetchType != rhs.__isset.fetchType) + return false; + else if (__isset.fetchType && !(fetchType == rhs.fetchType)) + return false; return true; } bool operator != (const TFetchResultsReq &rhs) const { diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java index 808b73f..068711f 100644 --- a/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java +++ b/service/src/gen/thrift/gen-javabean/org/apache/hive/service/cli/thrift/TFetchResultsReq.java @@ -37,6 +37,7 @@ private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField ORIENTATION_FIELD_DESC = new org.apache.thrift.protocol.TField("orientation", org.apache.thrift.protocol.TType.I32, (short)2); private static final org.apache.thrift.protocol.TField MAX_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxRows", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField FETCH_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("fetchType", org.apache.thrift.protocol.TType.I16, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,6 +48,7 @@ private TOperationHandle operationHandle; // required private TFetchOrientation orientation; // required private long maxRows; // required + private short fetchType; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -56,7 +58,8 @@ * @see TFetchOrientation */ ORIENTATION((short)2, "orientation"), - MAX_ROWS((short)3, "maxRows"); + MAX_ROWS((short)3, "maxRows"), + FETCH_TYPE((short)4, "fetchType"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return ORIENTATION; case 3: // MAX_ROWS return MAX_ROWS; + case 4: // FETCH_TYPE + return FETCH_TYPE; default: return null; } @@ -118,7 +123,9 @@ public String getFieldName() { // isset id assignments private static final int __MAXROWS_ISSET_ID = 0; + private static final int __FETCHTYPE_ISSET_ID = 1; private byte __isset_bitfield = 0; + private _Fields optionals[] = {_Fields.FETCH_TYPE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,6 +135,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TFetchOrientation.class))); tmpMap.put(_Fields.MAX_ROWS, new org.apache.thrift.meta_data.FieldMetaData("maxRows", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.FETCH_TYPE, new org.apache.thrift.meta_data.FieldMetaData("fetchType", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFetchResultsReq.class, metaDataMap); } @@ -135,6 +144,8 @@ public String getFieldName() { public TFetchResultsReq() { this.orientation = org.apache.hive.service.cli.thrift.TFetchOrientation.FETCH_NEXT; + this.fetchType = (short)0; + } public TFetchResultsReq( @@ -161,6 +172,7 @@ public TFetchResultsReq(TFetchResultsReq other) { this.orientation = other.orientation; } this.maxRows = other.maxRows; + this.fetchType = other.fetchType; } public TFetchResultsReq deepCopy() { @@ -174,6 +186,8 @@ public void clear() { setMaxRowsIsSet(false); this.maxRows = 0; + this.fetchType = (short)0; + } public TOperationHandle getOperationHandle() { @@ -252,6 +266,28 @@ public void setMaxRowsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXROWS_ISSET_ID, value); } + public short getFetchType() { + return this.fetchType; + } + + public void setFetchType(short fetchType) { + this.fetchType = fetchType; + setFetchTypeIsSet(true); + } + + public void unsetFetchType() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); + } + + /** Returns true if field fetchType is set (has been assigned a value) and false otherwise */ + public boolean isSetFetchType() { + return EncodingUtils.testBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); + } + + public void setFetchTypeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FETCHTYPE_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case OPERATION_HANDLE: @@ -278,6 +314,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case FETCH_TYPE: + if (value == null) { + unsetFetchType(); + } else { + setFetchType((Short)value); + } + break; + } } @@ -292,6 +336,9 @@ public Object getFieldValue(_Fields field) { case MAX_ROWS: return Long.valueOf(getMaxRows()); + case FETCH_TYPE: + return Short.valueOf(getFetchType()); + } throw new IllegalStateException(); } @@ -309,6 +356,8 @@ public boolean isSet(_Fields field) { return isSetOrientation(); case MAX_ROWS: return isSetMaxRows(); + case FETCH_TYPE: + return isSetFetchType(); } throw new IllegalStateException(); } @@ -353,6 +402,15 @@ public boolean equals(TFetchResultsReq that) { return false; } + boolean this_present_fetchType = true && this.isSetFetchType(); + boolean that_present_fetchType = true && that.isSetFetchType(); + if (this_present_fetchType || that_present_fetchType) { + if (!(this_present_fetchType && that_present_fetchType)) + return false; + if (this.fetchType != that.fetchType) + return false; + } + return true; } @@ -375,6 +433,11 @@ public int hashCode() { if (present_maxRows) builder.append(maxRows); + boolean present_fetchType = true && (isSetFetchType()); + builder.append(present_fetchType); + if (present_fetchType) + builder.append(fetchType); + return builder.toHashCode(); } @@ -416,6 +479,16 @@ public int compareTo(TFetchResultsReq other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetFetchType()).compareTo(typedOther.isSetFetchType()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFetchType()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fetchType, typedOther.fetchType); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -455,6 +528,12 @@ public String toString() { sb.append("maxRows:"); sb.append(this.maxRows); first = false; + if (isSetFetchType()) { + if (!first) sb.append(", "); + sb.append("fetchType:"); + sb.append(this.fetchType); + first = false; + } sb.append(")"); return sb.toString(); } @@ -540,6 +619,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TFetchResultsReq st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // FETCH_TYPE + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.fetchType = iprot.readI16(); + struct.setFetchTypeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -566,6 +653,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TFetchResultsReq s oprot.writeFieldBegin(MAX_ROWS_FIELD_DESC); oprot.writeI64(struct.maxRows); oprot.writeFieldEnd(); + if (struct.isSetFetchType()) { + oprot.writeFieldBegin(FETCH_TYPE_FIELD_DESC); + oprot.writeI16(struct.fetchType); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -586,6 +678,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq st struct.operationHandle.write(oprot); oprot.writeI32(struct.orientation.getValue()); oprot.writeI64(struct.maxRows); + BitSet optionals = new BitSet(); + if (struct.isSetFetchType()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetFetchType()) { + oprot.writeI16(struct.fetchType); + } } @Override @@ -598,6 +698,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq str struct.setOrientationIsSet(true); struct.maxRows = iprot.readI64(); struct.setMaxRowsIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.fetchType = iprot.readI16(); + struct.setFetchTypeIsSet(true); + } } } diff --git a/service/src/gen/thrift/gen-py/TCLIService/ttypes.py b/service/src/gen/thrift/gen-py/TCLIService/ttypes.py index 2cbbdd8..6cd64d0 100644 --- a/service/src/gen/thrift/gen-py/TCLIService/ttypes.py +++ b/service/src/gen/thrift/gen-py/TCLIService/ttypes.py @@ -5752,6 +5752,7 @@ class TFetchResultsReq: - operationHandle - orientation - maxRows + - fetchType """ thrift_spec = ( @@ -5759,12 +5760,14 @@ class TFetchResultsReq: (1, TType.STRUCT, 'operationHandle', (TOperationHandle, TOperationHandle.thrift_spec), None, ), # 1 (2, TType.I32, 'orientation', None, 0, ), # 2 (3, TType.I64, 'maxRows', None, None, ), # 3 + (4, TType.I16, 'fetchType', None, 0, ), # 4 ) - def __init__(self, operationHandle=None, orientation=thrift_spec[2][4], maxRows=None,): + def __init__(self, operationHandle=None, orientation=thrift_spec[2][4], maxRows=None, fetchType=thrift_spec[4][4],): self.operationHandle = operationHandle self.orientation = orientation self.maxRows = maxRows + self.fetchType = fetchType def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -5791,6 +5794,11 @@ def read(self, iprot): self.maxRows = iprot.readI64(); else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I16: + self.fetchType = iprot.readI16(); + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -5813,6 +5821,10 @@ def write(self, oprot): oprot.writeFieldBegin('maxRows', TType.I64, 3) oprot.writeI64(self.maxRows) oprot.writeFieldEnd() + if self.fetchType is not None: + oprot.writeFieldBegin('fetchType', TType.I16, 4) + oprot.writeI16(self.fetchType) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() diff --git a/service/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb b/service/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb index 93f9a81..c731544 100644 --- a/service/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb +++ b/service/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb @@ -1598,11 +1598,13 @@ class TFetchResultsReq OPERATIONHANDLE = 1 ORIENTATION = 2 MAXROWS = 3 + FETCHTYPE = 4 FIELDS = { OPERATIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'operationHandle', :class => ::TOperationHandle}, ORIENTATION => {:type => ::Thrift::Types::I32, :name => 'orientation', :default => 0, :enum_class => ::TFetchOrientation}, - MAXROWS => {:type => ::Thrift::Types::I64, :name => 'maxRows'} + MAXROWS => {:type => ::Thrift::Types::I64, :name => 'maxRows'}, + FETCHTYPE => {:type => ::Thrift::Types::I16, :name => 'fetchType', :default => 0, :optional => true} } def struct_fields; FIELDS; end diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java index d2cdfc1..ff5de4a 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIService.java +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java @@ -46,7 +46,6 @@ import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.cli.operation.Operation; -import org.apache.hive.service.cli.session.HiveSession; import org.apache.hive.service.cli.session.SessionManager; import org.apache.hive.service.cli.thrift.TProtocolVersion; @@ -67,7 +66,6 @@ private HiveConf hiveConf; private SessionManager sessionManager; - private IMetaStoreClient metastoreClient; private UserGroupInformation serviceUGI; private UserGroupInformation httpUGI; @@ -80,11 +78,8 @@ public synchronized void init(HiveConf hiveConf) { this.hiveConf = hiveConf; sessionManager = new SessionManager(); addService(sessionManager); - /** - * If auth mode is Kerberos, do a kerberos login for the service from the keytab - */ - if (hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION).equalsIgnoreCase( - HiveAuthFactory.AuthTypes.KERBEROS.toString())) { + // If the hadoop cluster is secure, do a kerberos login for the service from the keytab + if (ShimLoader.getHadoopShims().isSecurityEnabled()) { try { HiveAuthFactory.loginFromKeytab(hiveConf); this.serviceUGI = ShimLoader.getHadoopShims().getUGIForConf(hiveConf); @@ -132,21 +127,23 @@ public synchronized void start() { } catch (IOException eIO) { throw new ServiceException("Error setting stage directories", eIO); } - + // Initialize and test a connection to the metastore + IMetaStoreClient metastoreClient = null; try { - // Initialize and test a connection to the metastore metastoreClient = new HiveMetaStoreClient(hiveConf); metastoreClient.getDatabases("default"); } catch (Exception e) { throw new ServiceException("Unable to connect to MetaStore!", e); } + finally { + if (metastoreClient != null) { + metastoreClient.close(); + } + } } @Override public synchronized void stop() { - if (metastoreClient != null) { - metastoreClient.close(); - } super.stop(); } @@ -170,7 +167,7 @@ public SessionHandle openSessionWithImpersonation(TProtocolVersion protocol, Str throws HiveSQLException { SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, null, configuration, true, delegationToken); - LOG.debug(sessionHandle + ": openSession()"); + LOG.debug(sessionHandle + ": openSessionWithImpersonation()"); return sessionHandle; } @@ -423,25 +420,20 @@ public TableSchema getResultSetMetadata(OperationHandle opHandle) } /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle, org.apache.hive.service.cli.FetchOrientation, long) + * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) */ @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) + public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { - RowSet rowSet = sessionManager.getOperationManager().getOperation(opHandle) - .getParentSession().fetchResults(opHandle, orientation, maxRows); - LOG.debug(opHandle + ": fetchResults()"); - return rowSet; + return fetchResults(opHandle, Operation.DEFAULT_FETCH_ORIENTATION, + Operation.DEFAULT_FETCH_MAX_ROWS, FetchType.QUERY_OUTPUT); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) - */ @Override - public RowSet fetchResults(OperationHandle opHandle) - throws HiveSQLException { + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException { RowSet rowSet = sessionManager.getOperationManager().getOperation(opHandle) - .getParentSession().fetchResults(opHandle); + .getParentSession().fetchResults(opHandle, orientation, maxRows, fetchType); LOG.debug(opHandle + ": fetchResults()"); return rowSet; } diff --git a/service/src/java/org/apache/hive/service/cli/CLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/CLIServiceClient.java index 87c10b9..3155c23 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/CLIServiceClient.java @@ -28,19 +28,17 @@ * */ public abstract class CLIServiceClient implements ICLIService { + private static final long DEFAULT_MAX_ROWS = 1000; public SessionHandle openSession(String username, String password) throws HiveSQLException { return openSession(username, password, Collections.emptyMap()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) - */ @Override public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { // TODO: provide STATIC default value - return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 1000); + return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, DEFAULT_MAX_ROWS, FetchType.QUERY_OUTPUT); } @Override diff --git a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java index f665146..9cad5be 100644 --- a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java @@ -181,13 +181,10 @@ public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQL return cliService.getResultSetMetadata(opHandle); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#fetchResults(org.apache.hive.service.cli.OperationHandle, org.apache.hive.service.cli.FetchOrientation, long) - */ @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) - throws HiveSQLException { - return cliService.fetchResults(opHandle, orientation, maxRows); + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException { + return cliService.fetchResults(opHandle, orientation, maxRows, fetchType); } diff --git a/service/src/java/org/apache/hive/service/cli/FetchType.java b/service/src/java/org/apache/hive/service/cli/FetchType.java new file mode 100644 index 0000000..a8e7fe1 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/FetchType.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli; + +/** + * FetchType indicates the type of fetchResults request. + * It maps the TFetchType, which is generated from Thrift interface. + */ +public enum FetchType { + QUERY_OUTPUT((short)0), + LOG((short)1); + + private final short tFetchType; + + FetchType(short tFetchType) { + this.tFetchType = tFetchType; + } + + public static FetchType getFetchType(short tFetchType) { + for (FetchType fetchType : values()) { + if (tFetchType == fetchType.toTFetchType()) { + return fetchType; + } + } + return QUERY_OUTPUT; + } + + public short toTFetchType() { + return tFetchType; + } +} diff --git a/service/src/java/org/apache/hive/service/cli/ICLIService.java b/service/src/java/org/apache/hive/service/cli/ICLIService.java index c569796..c9cc1f4 100644 --- a/service/src/java/org/apache/hive/service/cli/ICLIService.java +++ b/service/src/java/org/apache/hive/service/cli/ICLIService.java @@ -27,79 +27,78 @@ public interface ICLIService { - public abstract SessionHandle openSession(String username, String password, + SessionHandle openSession(String username, String password, Map configuration) throws HiveSQLException; - public abstract SessionHandle openSessionWithImpersonation(String username, String password, + SessionHandle openSessionWithImpersonation(String username, String password, Map configuration, String delegationToken) throws HiveSQLException; - public abstract void closeSession(SessionHandle sessionHandle) + void closeSession(SessionHandle sessionHandle) throws HiveSQLException; - public abstract GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) + GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) throws HiveSQLException; - public abstract OperationHandle executeStatement(SessionHandle sessionHandle, String statement, + OperationHandle executeStatement(SessionHandle sessionHandle, String statement, Map confOverlay) throws HiveSQLException; - public abstract OperationHandle executeStatementAsync(SessionHandle sessionHandle, + OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, Map confOverlay) throws HiveSQLException; - public abstract OperationHandle getTypeInfo(SessionHandle sessionHandle) + OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getCatalogs(SessionHandle sessionHandle) + OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getSchemas(SessionHandle sessionHandle, + OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName) throws HiveSQLException; - public abstract OperationHandle getTables(SessionHandle sessionHandle, + OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, List tableTypes) throws HiveSQLException; - public abstract OperationHandle getTableTypes(SessionHandle sessionHandle) + OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getColumns(SessionHandle sessionHandle, + OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, String columnName) throws HiveSQLException; - public abstract OperationHandle getFunctions(SessionHandle sessionHandle, + OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName, String functionName) throws HiveSQLException; - public abstract OperationStatus getOperationStatus(OperationHandle opHandle) + OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException; - public abstract void cancelOperation(OperationHandle opHandle) + void cancelOperation(OperationHandle opHandle) throws HiveSQLException; - public abstract void closeOperation(OperationHandle opHandle) + void closeOperation(OperationHandle opHandle) throws HiveSQLException; - public abstract TableSchema getResultSetMetadata(OperationHandle opHandle) + TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException; - public abstract RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows) - throws HiveSQLException; - - public abstract RowSet fetchResults(OperationHandle opHandle) + RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException; - public abstract String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException; + + String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException; - public abstract void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; - public abstract void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java index c9fd5f9..dec8757 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java @@ -42,11 +42,8 @@ protected GetCatalogsOperation(HiveSession parentSession) { rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); setState(OperationState.FINISHED); } diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java index caf413d..92ea7b0 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java @@ -114,11 +114,8 @@ protected GetColumnsOperation(HiveSession parentSession, String catalogName, Str this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java index fd4e94d..c54d6e3 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java @@ -68,11 +68,8 @@ public GetFunctionsOperation(HiveSession parentSession, this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { if ((null == catalogName || "".equals(catalogName)) diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java index ebca996..f0e22b8 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java @@ -50,11 +50,8 @@ protected GetSchemasOperation(HiveSession parentSession, this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java index 05991e0..486e50e 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java @@ -50,11 +50,8 @@ protected GetTableTypesOperation(HiveSession parentSession) { rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { for (TableType type : TableType.values()) { diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java index 315dbea..823dc4d 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetTablesOperation.java @@ -71,11 +71,8 @@ protected GetTablesOperation(HiveSession parentSession, this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java index 0ec2543..d251c9b 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java @@ -79,11 +79,8 @@ protected GetTypeInfoOperation(HiveSession parentSession) { rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { for (Type type : Type.values()) { diff --git a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java index 3d3fddc..bcc66cf 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java @@ -94,11 +94,8 @@ private void tearDownSessionIO() { IOUtils.cleanup(LOG, parentSession.getSessionState().err); } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.operation.Operation#run() - */ @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.RUNNING); try { String command = getStatement().trim(); @@ -136,6 +133,7 @@ public void close() throws HiveSQLException { setState(OperationState.CLOSED); tearDownSessionIO(); cleanTmpFile(); + cleanupOperationLog(); } /* (non-Javadoc) diff --git a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java new file mode 100644 index 0000000..7e61919 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.operation; +import java.io.CharArrayWriter; + +import org.apache.log4j.Layout; +import org.apache.log4j.Logger; +import org.apache.log4j.WriterAppender; +import org.apache.log4j.spi.Filter; +import org.apache.log4j.spi.LoggingEvent; + +/** + * An Appender to divert logs from individual threads to the LogObject they belong to. + */ +public class LogDivertAppender extends WriterAppender { + private static final Logger LOG = Logger.getLogger(LogDivertAppender.class.getName()); + private final OperationManager operationManager; + + /** + * A log filter that exclude messages coming from the logger with the given name. + * We apply this filter on the Loggers used by the log diversion stuff, so that + * they don't generate more logs for themselves when they process logs. + */ + private static class NameExclusionFilter extends Filter { + private String excludeLoggerName = null; + + public NameExclusionFilter(String excludeLoggerName) { + this.excludeLoggerName = excludeLoggerName; + } + + @Override + public int decide(LoggingEvent ev) { + if (ev.getLoggerName().equals(excludeLoggerName)) { + return Filter.DENY; + } + return Filter.NEUTRAL; + } + } + + /** This is where the log message will go to */ + private final CharArrayWriter writer = new CharArrayWriter(); + + public LogDivertAppender(Layout layout, OperationManager operationManager) { + setLayout(layout); + setWriter(writer); + setName("LogDivertAppender"); + this.operationManager = operationManager; + + // Filter out messages coming from log processing classes, or we'll run an infinite loop. + addFilter(new NameExclusionFilter(LOG.getName())); + addFilter(new NameExclusionFilter(OperationLog.class.getName())); + addFilter(new NameExclusionFilter(OperationManager.class.getName())); + } + + /** + * Overrides WriterAppender.subAppend(), which does the real logging. + * No need to worry about concurrency since log4j calls this synchronously. + */ + @Override + protected void subAppend(LoggingEvent event) { + super.subAppend(event); + // That should've gone into our writer. Notify the LogContext. + String logOutput = writer.toString(); + writer.reset(); + + OperationLog log = operationManager.getOperationLogByThread(); + if (log == null) { + LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName()); + return; + } + log.writeOperationLog(logOutput); + } +} diff --git a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java index e0d17a1..3a1e2a0 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java @@ -46,6 +46,7 @@ protected MetadataOperation(HiveSession parentSession, OperationType opType) { @Override public void close() throws HiveSQLException { setState(OperationState.CLOSED); + cleanupOperationLog(); } /** diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java index 45fbd61..0d6436e 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java @@ -17,6 +17,8 @@ */ package org.apache.hive.service.cli.operation; +import java.io.File; +import java.io.FileNotFoundException; import java.util.EnumSet; import java.util.concurrent.Future; @@ -41,11 +43,14 @@ private final OperationHandle opHandle; private HiveConf configuration; public static final Log LOG = LogFactory.getLog(Operation.class.getName()); + public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = FetchOrientation.FETCH_NEXT; public static final long DEFAULT_FETCH_MAX_ROWS = 100; protected boolean hasResultSet; protected volatile HiveSQLException operationException; protected final boolean runAsync; protected volatile Future backgroundHandle; + protected OperationLog operationLog; + protected boolean isOperationLogEnabled; protected static final EnumSet DEFAULT_FETCH_ORIENTATION_SET = EnumSet.of(FetchOrientation.FETCH_NEXT,FetchOrientation.FETCH_FIRST); @@ -106,6 +111,11 @@ protected void setHasResultSet(boolean hasResultSet) { opHandle.setHasResultSet(hasResultSet); } + + public OperationLog getOperationLog() { + return operationLog; + } + protected final OperationState setState(OperationState newState) throws HiveSQLException { state.validateTransition(newState); this.state = newState; @@ -138,7 +148,97 @@ public boolean isFailed() { return OperationState.ERROR.equals(state); } - public abstract void run() throws HiveSQLException; + protected void createOperationLog() { + if (parentSession.isOperationLogEnabled()) { + File operationLogFile = new File(parentSession.getOperationLogSessionDir(), + opHandle.getHandleIdentifier().toString()); + isOperationLogEnabled = true; + + // create log file + try { + if (operationLogFile.exists()) { + LOG.warn("The operation log file should not exist, but it is already there: " + + operationLogFile.getAbsolutePath()); + operationLogFile.delete(); + } + if (!operationLogFile.createNewFile()) { + // the log file already exists and cannot be deleted. + // If it can be read/written, keep its contents and use it. + if (!operationLogFile.canRead() || !operationLogFile.canWrite()) { + LOG.warn("The already existed operation log file cannot be recreated, " + + "and it cannot be read or written: " + operationLogFile.getAbsolutePath()); + isOperationLogEnabled = false; + return; + } + } + } catch (Exception e) { + LOG.warn("Unable to create operation log file: " + operationLogFile.getAbsolutePath(), e); + isOperationLogEnabled = false; + return; + } + + // create OperationLog object with above log file + try { + operationLog = new OperationLog(opHandle.toString(), operationLogFile); + } catch (FileNotFoundException e) { + LOG.warn("Unable to instantiate OperationLog object for operation: " + + opHandle, e); + isOperationLogEnabled = false; + return; + } + + // register this operationLog to current thread + OperationLog.setCurrentOperationLog(operationLog); + } + } + + protected void unregisterOperationLog() { + if (isOperationLogEnabled) { + OperationLog.removeCurrentOperationLog(); + } + } + + /** + * Invoked before runInternal(). + * Set up some preconditions, or configurations. + */ + protected void beforeRun() { + createOperationLog(); + } + + /** + * Invoked after runInternal(), even if an exception is thrown in runInternal(). + * Clean up resources, which was set up in beforeRun(). + */ + protected void afterRun() { + unregisterOperationLog(); + } + + /** + * Implemented by subclass of Operation class to execute specific behaviors. + * @throws HiveSQLException + */ + protected abstract void runInternal() throws HiveSQLException; + + public void run() throws HiveSQLException { + beforeRun(); + try { + runInternal(); + } finally { + afterRun(); + } + } + + protected void cleanupOperationLog() { + if (isOperationLogEnabled) { + if (operationLog == null) { + LOG.error("Operation [ " + opHandle.getHandleIdentifier() + " ] " + + "logging is enabled, but its OperationLog object cannot be found."); + } else { + operationLog.close(); + } + } + } // TODO: make this abstract and implement in subclasses. public void cancel() throws HiveSQLException { diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationLog.java b/service/src/java/org/apache/hive/service/cli/operation/OperationLog.java new file mode 100644 index 0000000..d33bcf7 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationLog.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive.service.cli.operation; + +import com.google.common.base.Charsets; +import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.IOUtils; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; + +import java.io.*; +import java.util.ArrayList; +import java.util.List; + +/** + * OperationLog wraps the actual operation log file, and provides interface + * for accessing, reading, writing, and removing the file. + */ +public class OperationLog { + private static final Log LOG = LogFactory.getLog(OperationLog.class.getName()); + + private final String operationName; + private final LogFile logFile; + + public OperationLog(String name, File file) throws FileNotFoundException{ + operationName = name; + logFile = new LogFile(file); + } + + /** + * Singleton OperationLog object per thread. + */ + private static final ThreadLocal THREAD_LOCAL_OPERATION_LOG = new + ThreadLocal() { + @Override + protected synchronized OperationLog initialValue() { + return null; + } + }; + + public static void setCurrentOperationLog(OperationLog operationLog) { + THREAD_LOCAL_OPERATION_LOG.set(operationLog); + } + + public static OperationLog getCurrentOperationLog() { + return THREAD_LOCAL_OPERATION_LOG.get(); + } + + public static void removeCurrentOperationLog() { + THREAD_LOCAL_OPERATION_LOG.remove(); + } + + /** + * Write operation execution logs into log file + * @param operationLogMessage one line of log emitted from log4j + */ + public void writeOperationLog(String operationLogMessage) { + logFile.write(operationLogMessage); + } + + /** + * Read operation execution logs from log file + * @param fetchOrientation one of Enum FetchOrientation values + * @param maxRows the max number of fetched lines from log + * @return + * @throws HiveSQLException + */ + public List readOperationLog(FetchOrientation fetchOrientation, long maxRows) + throws HiveSQLException{ + return logFile.read(fetchOrientation, maxRows); + } + + /** + * Close this OperationLog when operation is closed. The log file will be removed. + */ + public void close() { + logFile.remove(); + } + + /** + * Wrapper for read/write the operation log file + */ + private class LogFile { + private File file; + private BufferedReader in; + private PrintStream out; + private volatile boolean isRemoved; + + LogFile(File file) throws FileNotFoundException { + this.file = file; + in = new BufferedReader(new InputStreamReader(new FileInputStream(file))); + out = new PrintStream(new FileOutputStream(file)); + isRemoved = false; + } + + synchronized void write(String msg) { + // write log to the file + out.print(msg); + } + + synchronized List read(FetchOrientation fetchOrientation, long maxRows) + throws HiveSQLException{ + // reset the BufferReader, if fetching from the beginning of the file + if (fetchOrientation.equals(FetchOrientation.FETCH_FIRST)) { + resetIn(); + } + + return readResults(maxRows); + } + + void remove() { + try { + FileUtils.forceDelete(file); + isRemoved = true; + } catch (Exception e) { + LOG.error("Failed to remove corresponding log file of operation: " + operationName, e); + } + } + + private void resetIn() { + if (in != null) { + IOUtils.cleanup(LOG, in); + in = null; + } + } + + private List readResults(long nLines) throws HiveSQLException { + if (in == null) { + try { + in = new BufferedReader(new InputStreamReader(new FileInputStream(file))); + } catch (FileNotFoundException e) { + if (isRemoved) { + throw new HiveSQLException("The operation has been closed and its log file " + + file.getAbsolutePath() + " has been removed.", e); + } else { + throw new HiveSQLException("Operation Log file " + file.getAbsolutePath() + + " is not found.", e); + } + } + } + + List logs = new ArrayList(); + String line = ""; + // if nLines <= 0, read all lines in log file. + for (int i = 0; i < nLines || nLines <= 0; i++) { + try { + line = in.readLine(); + if (line == null) { + break; + } else { + logs.add(line); + } + } catch (IOException e) { + if (isRemoved) { + throw new HiveSQLException("The operation has been closed and its log file " + + file.getAbsolutePath() + " has been removed.", e); + } else { + throw new HiveSQLException("Reading operation log file encountered an exception: ", e); + } + } + } + return logs; + } + } +} diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java index 21c33bc..2867301 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -18,6 +18,7 @@ package org.apache.hive.service.cli.operation; +import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -25,22 +26,19 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hive.service.AbstractService; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationStatus; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.*; import org.apache.hive.service.cli.session.HiveSession; +import org.apache.log4j.*; /** * OperationManager. * */ public class OperationManager extends AbstractService { - + private static final String DEFAULT_LAYOUT_PATTERN = "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"; private final Log LOG = LogFactory.getLog(OperationManager.class.getName()); private HiveConf hiveConf; @@ -54,7 +52,11 @@ public OperationManager() { @Override public synchronized void init(HiveConf hiveConf) { this.hiveConf = hiveConf; - + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { + initOperationLogCapture(); + } else { + LOG.debug("Operation level logging is turned off"); + } super.init(hiveConf); } @@ -70,6 +72,30 @@ public synchronized void stop() { super.stop(); } + private void initOperationLogCapture() { + // There should be a ConsoleAppender. Copy its Layout. + Logger root = Logger.getRootLogger(); + Layout layout = null; + + Enumeration appenders = root.getAllAppenders(); + while (appenders.hasMoreElements()) { + Appender ap = (Appender) appenders.nextElement(); + if (ap.getClass().equals(ConsoleAppender.class)) { + layout = ap.getLayout(); + break; + } + } + + if (layout == null) { + layout = new PatternLayout(DEFAULT_LAYOUT_PATTERN); + LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern."); + } + + // Register another Appender (with the same layout) that talks to us. + Appender ap = new LogDivertAppender(layout, this); + root.addAppender(ap); + } + public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession, String statement, Map confOverlay, boolean runAsync) throws HiveSQLException { @@ -191,4 +217,39 @@ public RowSet getOperationNextRowSet(OperationHandle opHandle, throws HiveSQLException { return getOperation(opHandle).getNextRowSet(orientation, maxRows); } + + public RowSet getOperationLogRowSet(OperationHandle opHandle, + FetchOrientation orientation, long maxRows) + throws HiveSQLException { + // get the OperationLog object from the operation + OperationLog operationLog = getOperation(opHandle).getOperationLog(); + if (operationLog == null) { + throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); + } + + // read logs + List logs = operationLog.readOperationLog(orientation, maxRows); + + // convert logs to RowSet + TableSchema tableSchema = new TableSchema(getLogSchema()); + RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion()); + for (String log : logs) { + rowSet.addRow(new String[] {log}); + } + + return rowSet; + } + + private Schema getLogSchema() { + Schema schema = new Schema(); + FieldSchema fieldSchema = new FieldSchema(); + fieldSchema.setName("operation_log"); + fieldSchema.setType("string"); + schema.addToFieldSchemas(fieldSchema); + return schema; + } + + public OperationLog getOperationLogByThread() { + return OperationLog.getCurrentOperationLog(); + } } diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java index de54ca1..8cabf7e 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -60,6 +60,7 @@ import org.apache.hive.service.cli.RowSetFactory; import org.apache.hive.service.cli.TableSchema; import org.apache.hive.service.cli.session.HiveSession; +import org.apache.hive.service.server.ThreadWithGarbageCleanup; /** * SQLOperation. @@ -134,7 +135,7 @@ public void prepare(HiveConf sqlOperationConf) throws HiveSQLException { } } - private void runInternal(HiveConf sqlOperationConf) throws HiveSQLException { + private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException { try { // In Hive server mode, we are not able to retry in the FetchTask // case, when calling fetch queries since execute() has returned. @@ -164,50 +165,63 @@ private void runInternal(HiveConf sqlOperationConf) throws HiveSQLException { } @Override - public void run() throws HiveSQLException { + public void runInternal() throws HiveSQLException { setState(OperationState.PENDING); final HiveConf opConfig = getConfigForOperation(); prepare(opConfig); if (!shouldRunAsync()) { - runInternal(opConfig); + runQuery(opConfig); } else { + // We'll pass ThreadLocals in the background thread from the foreground (handler) thread final SessionState parentSessionState = SessionState.get(); - // current Hive object needs to be set in aysnc thread in case of remote metastore. - // The metastore client in Hive is associated with right user - final Hive sessionHive = getCurrentHive(); - // current UGI will get used by metastore when metsatore is in embedded mode - // so this needs to get passed to the new async thread + // ThreadLocal Hive object needs to be set in background thread. + // The metastore client in Hive is associated with right user. + final Hive parentHive = getSessionHive(); + // Current UGI will get used by metastore when metsatore is in embedded mode + // So this needs to get passed to the new background thread final UserGroupInformation currentUGI = getCurrentUGI(opConfig); - // Runnable impl to call runInternal asynchronously, // from a different thread Runnable backgroundOperation = new Runnable() { - @Override public void run() { PrivilegedExceptionAction doAsAction = new PrivilegedExceptionAction() { @Override public Object run() throws HiveSQLException { - - // Storing the current Hive object necessary when doAs is enabled - // User information is part of the metastore client member in Hive - Hive.set(sessionHive); + Hive.set(parentHive); SessionState.setCurrentSessionState(parentSessionState); + // Set current OperationLog in this async thread for keeping on saving query log. + registerCurrentOperationLog(); try { - runInternal(opConfig); + runQuery(opConfig); } catch (HiveSQLException e) { setOperationException(e); LOG.error("Error running hive query: ", e); + } finally { + unregisterOperationLog(); } return null; } }; + try { ShimLoader.getHadoopShims().doAs(currentUGI, doAsAction); } catch (Exception e) { setOperationException(new HiveSQLException(e)); LOG.error("Error running hive query as user : " + currentUGI.getShortUserName(), e); } + finally { + /** + * We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup + * when this thread is garbage collected later. + * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize() + */ + if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) { + ThreadWithGarbageCleanup currentThread = + (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread(); + currentThread.cacheThreadLocalRawStore(); + } + } } }; try { @@ -223,6 +237,12 @@ public Object run() throws HiveSQLException { } } + /** + * Returns the current UGI on the stack + * @param opConfig + * @return UserGroupInformation + * @throws HiveSQLException + */ private UserGroupInformation getCurrentUGI(HiveConf opConfig) throws HiveSQLException { try { return ShimLoader.getHadoopShims().getUGIForConf(opConfig); @@ -231,11 +251,28 @@ private UserGroupInformation getCurrentUGI(HiveConf opConfig) throws HiveSQLExce } } - private Hive getCurrentHive() throws HiveSQLException { + /** + * Returns the ThreadLocal Hive for the current thread + * @return Hive + * @throws HiveSQLException + */ + private Hive getSessionHive() throws HiveSQLException { try { return Hive.get(); } catch (HiveException e) { - throw new HiveSQLException("Failed to get current Hive object", e); + throw new HiveSQLException("Failed to get ThreadLocal Hive object", e); + } + } + + private void registerCurrentOperationLog() { + if (isOperationLogEnabled) { + if (operationLog == null) { + LOG.warn("Failed to get current OperationLog object of Operation: " + + getHandle().getHandleIdentifier()); + isOperationLogEnabled = false; + return; + } + OperationLog.setCurrentOperationLog(operationLog); } } @@ -267,6 +304,7 @@ public void cancel() throws HiveSQLException { @Override public void close() throws HiveSQLException { cleanup(OperationState.CLOSED); + cleanupOperationLog(); } @Override diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java index 9785e95..270e4a6 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java @@ -23,13 +23,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.GetInfoType; -import org.apache.hive.service.cli.GetInfoValue; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.*; public interface HiveSession extends HiveSessionBase { @@ -144,10 +138,8 @@ public OperationHandle getFunctions(String catalogName, String schemaName, public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException; - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) - throws HiveSQLException; - - public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException; + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException; public String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException; diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java index eee1cc6..84e1c7e 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java @@ -24,6 +24,7 @@ import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.cli.thrift.TProtocolVersion; +import java.io.File; import java.util.Map; /** @@ -38,40 +39,57 @@ * Set the session manager for the session * @param sessionManager */ - public void setSessionManager(SessionManager sessionManager); + void setSessionManager(SessionManager sessionManager); /** * Get the session manager for the session */ - public SessionManager getSessionManager(); + SessionManager getSessionManager(); /** * Set operation manager for the session * @param operationManager */ - public void setOperationManager(OperationManager operationManager); + void setOperationManager(OperationManager operationManager); /** * Initialize the session * @param sessionConfMap */ - public void initialize(Map sessionConfMap) throws Exception; + void initialize(Map sessionConfMap) throws Exception; - public SessionHandle getSessionHandle(); + /** + * Check whether operation logging is enabled and session dir is created successfully + */ + boolean isOperationLogEnabled(); + + /** + * Get the session dir, which is the parent dir of operation logs + * @return a file representing the parent directory of operation logs + */ + File getOperationLogSessionDir(); + + /** + * Set the session dir, which is the parent dir of operation logs + * @param operationLogRootDir the parent dir of the session dir + */ + void setOperationLogSessionDir(File operationLogRootDir); + + SessionHandle getSessionHandle(); - public String getUsername(); + String getUsername(); - public String getPassword(); + String getPassword(); - public HiveConf getHiveConf(); + HiveConf getHiveConf(); - public SessionState getSessionState(); + SessionState getSessionState(); - public String getUserName(); + String getUserName(); - public void setUserName(String userName); + void setUserName(String userName); - public String getIpAddress(); + String getIpAddress(); - public void setIpAddress(String ipAddress); + void setIpAddress(String ipAddress); } diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index bc0a02c..4e5f595 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -28,6 +28,7 @@ import java.util.Map; import java.util.Set; +import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.common.cli.HiveFileProcessor; @@ -44,14 +45,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.common.util.HiveVersionInfo; import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.GetInfoType; -import org.apache.hive.service.cli.GetInfoValue; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.*; import org.apache.hive.service.cli.operation.ExecuteStatementOperation; import org.apache.hive.service.cli.operation.GetCatalogsOperation; import org.apache.hive.service.cli.operation.GetColumnsOperation; @@ -62,6 +56,7 @@ import org.apache.hive.service.cli.operation.MetadataOperation; import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.cli.thrift.TProtocolVersion; +import org.apache.hive.service.server.ThreadWithGarbageCleanup; /** * HiveSession @@ -86,6 +81,8 @@ private OperationManager operationManager; private IMetaStoreClient metastoreClient = null; private final Set opHandleSet = new HashSet(); + private boolean isOperationLogEnabled; + private File sessionLogDir; public HiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) { @@ -95,14 +92,19 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo this.hiveConf = new HiveConf(serverhiveConf); this.ipAddress = ipAddress; - // set an explicit session name to control the download directory name + // Set an explicit session name to control the download directory name hiveConf.set(ConfVars.HIVESESSIONID.varname, sessionHandle.getHandleIdentifier().toString()); - // use thrift transportable formatter + // Use thrift transportable formatter hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); + /** + * Create a new SessionState object that will be associated with this HiveServer2 session. + * When the server executes multiple queries in the same session, + * this SessionState object is reused across multiple queries. + */ sessionState = new SessionState(hiveConf, username); sessionState.setUserIpAddress(ipAddress); sessionState.setIsHiveServerQuery(true); @@ -111,11 +113,9 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo @Override public void initialize(Map sessionConfMap) throws Exception { - //process global init file: .hiverc + // Process global init file: .hiverc processGlobalInitFile(); - SessionState.setCurrentSessionState(sessionState); - - //set conf properties specified by user from client side + // Set conf properties specified by user from client side if (sessionConfMap != null) { configureSession(sessionConfMap); } @@ -169,6 +169,7 @@ private void processGlobalInitFile() { } private void configureSession(Map sessionConfMap) throws Exception { + SessionState.setCurrentSessionState(sessionState); for (Map.Entry entry : sessionConfMap.entrySet()) { String key = entry.getKey(); if (key.startsWith("set:")) { @@ -182,6 +183,34 @@ private void configureSession(Map sessionConfMap) throws Excepti } @Override + public void setOperationLogSessionDir(File operationLogRootDir) { + sessionLogDir = new File(operationLogRootDir, sessionHandle.getHandleIdentifier().toString()); + isOperationLogEnabled = true; + + if (!sessionLogDir.exists()) { + if (!sessionLogDir.mkdir()) { + LOG.warn("Unable to create operation log session directory: " + + sessionLogDir.getAbsolutePath()); + isOperationLogEnabled = false; + } + } + + if (isOperationLogEnabled) { + LOG.info("Operation log session directory is created: " + sessionLogDir.getAbsolutePath()); + } + } + + @Override + public boolean isOperationLogEnabled() { + return isOperationLogEnabled; + } + + @Override + public File getOperationLogSessionDir() { + return sessionLogDir; + } + + @Override public TProtocolVersion getProtocolVersion() { return sessionHandle.getProtocolVersion(); } @@ -211,14 +240,26 @@ public void open() { } protected synchronized void acquire() throws HiveSQLException { - // need to make sure that the this connections session state is - // stored in the thread local for sessions. + // Need to make sure that the this HiveServer2's session's session state is + // stored in the thread local for the handler thread. SessionState.setCurrentSessionState(sessionState); } + /** + * 1. We'll remove the ThreadLocal SessionState as this thread might now serve + * other requests. + * 2. We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup + * when this thread is garbage collected later. + * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize() + */ protected synchronized void release() { assert sessionState != null; SessionState.detachSession(); + if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) { + ThreadWithGarbageCleanup currentThread = + (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread(); + currentThread.cacheThreadLocalRawStore(); + } } @Override @@ -468,7 +509,7 @@ public void close() throws HiveSQLException { try { acquire(); /** - * For metadata operations like getTables(), getColumns() etc, + * For metadata operations like getTables(), getColumns() etc, * the session allocates a private metastore handler which should be * closed at the end of the session */ @@ -480,6 +521,9 @@ public void close() throws HiveSQLException { operationManager.closeOperation(opHandle); } opHandleSet.clear(); + // Cleanup session log directory. + cleanupSessionLogDir(); + HiveHistory hiveHist = sessionState.getHiveHistory(); if (null != hiveHist) { hiveHist.closeStream(); @@ -492,6 +536,16 @@ public void close() throws HiveSQLException { } } + private void cleanupSessionLogDir() { + if (isOperationLogEnabled) { + try { + FileUtils.forceDelete(sessionLogDir); + } catch (Exception e) { + LOG.error("Failed to cleanup session log dir: " + sessionHandle, e); + } + } + } + @Override public SessionState getSessionState() { return sessionState; @@ -539,22 +593,17 @@ public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQL } @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) - throws HiveSQLException { - acquire(); - try { - return sessionManager.getOperationManager() - .getOperationNextRowSet(opHandle, orientation, maxRows); - } finally { - release(); - } - } - - @Override - public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException { acquire(); try { - return sessionManager.getOperationManager().getOperationNextRowSet(opHandle); + if (fetchType == FetchType.QUERY_OUTPUT) { + return sessionManager.getOperationManager() + .getOperationNextRowSet(opHandle, orientation, maxRows); + } else { + return sessionManager.getOperationManager() + .getOperationLogRowSet(opHandle, orientation, maxRows); + } } finally { release(); } diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java index 39d2184..7668904 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java @@ -48,6 +48,14 @@ public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String super(protocol, username, password, hiveConf, ipAddress); setSessionUGI(username); setDelegationToken(delegationToken); + + // create a new metastore connection for this particular user session + Hive.set(null); + try { + sessionHive = Hive.get(getHiveConf()); + } catch (HiveException e) { + throw new HiveSQLException("Failed to setup metastore connection", e); + } } // setup appropriate UGI for the session @@ -115,13 +123,6 @@ private void setDelegationToken(String delegationTokenStr) throws HiveSQLExcepti } catch (IOException e) { throw new HiveSQLException("Couldn't setup delegation token in the ugi", e); } - // create a new metastore connection using the delegation token - Hive.set(null); - try { - sessionHive = Hive.get(getHiveConf()); - } catch (HiveException e) { - throw new HiveSQLException("Failed to setup metastore connection", e); - } } } diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java index d573592..17c1c7b 100644 --- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java +++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java @@ -18,6 +18,8 @@ package org.apache.hive.service.cli.session; +import java.io.File; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -26,6 +28,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -38,6 +41,7 @@ import org.apache.hive.service.cli.SessionHandle; import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.cli.thrift.TProtocolVersion; +import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; /** * SessionManager. @@ -52,6 +56,8 @@ new ConcurrentHashMap(); private final OperationManager operationManager = new OperationManager(); private ThreadPoolExecutor backgroundOperationPool; + private boolean isOperationLogEnabled; + private File operationLogRootDir; public SessionManager() { super("SessionManager"); @@ -64,22 +70,31 @@ public synchronized void init(HiveConf hiveConf) { } catch (HiveException e) { throw new RuntimeException("Error applying authorization policy on hive configuration", e); } - this.hiveConf = hiveConf; + //Create operation log root directory, if operation logging is enabled + if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { + initOperationLogRootDir(); + } + createBackgroundOperationPool(); + addService(operationManager); + super.init(hiveConf); + } + + private void createBackgroundOperationPool() { int backgroundPoolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS); - LOG.info("HiveServer2: Async execution thread pool size: " + backgroundPoolSize); + LOG.info("HiveServer2: Background operation thread pool size: " + backgroundPoolSize); int backgroundPoolQueueSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE); - LOG.info("HiveServer2: Async execution wait queue size: " + backgroundPoolQueueSize); + LOG.info("HiveServer2: Background operation thread wait queue size: " + backgroundPoolQueueSize); int keepAliveTime = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME); - LOG.info("HiveServer2: Async execution thread keepalive time: " + keepAliveTime); + LOG.info("HiveServer2: Background operation thread keepalive time: " + keepAliveTime); // Create a thread pool with #backgroundPoolSize threads // Threads terminate when they are idle for more than the keepAliveTime - // An bounded blocking queue is used to queue incoming operations, if #operations > backgroundPoolSize + // A bounded blocking queue is used to queue incoming operations, if #operations > backgroundPoolSize + String threadPoolName = "HiveServer2-Background-Pool"; backgroundOperationPool = new ThreadPoolExecutor(backgroundPoolSize, backgroundPoolSize, - keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue(backgroundPoolQueueSize)); + keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue(backgroundPoolQueueSize), + new ThreadFactoryWithGarbageCleanup(threadPoolName)); backgroundOperationPool.allowCoreThreadTimeOut(true); - addService(operationManager); - super.init(hiveConf); } private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveException { @@ -91,6 +106,36 @@ private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveExc ss.applyAuthorizationPolicy(); } + private void initOperationLogRootDir() { + operationLogRootDir = new File( + hiveConf.getVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION)); + isOperationLogEnabled = true; + + if (operationLogRootDir.exists() && !operationLogRootDir.isDirectory()) { + LOG.warn("The operation log root directory exists, but it is not a directory: " + + operationLogRootDir.getAbsolutePath()); + isOperationLogEnabled = false; + } + + if (!operationLogRootDir.exists()) { + if (!operationLogRootDir.mkdirs()) { + LOG.warn("Unable to create operation log root directory: " + + operationLogRootDir.getAbsolutePath()); + isOperationLogEnabled = false; + } + } + + if (isOperationLogEnabled) { + LOG.info("Operation log root directory is created: " + operationLogRootDir.getAbsolutePath()); + try { + FileUtils.forceDeleteOnExit(operationLogRootDir); + } catch (IOException e) { + LOG.warn("Failed to schedule cleanup HS2 operation logging root dir: " + + operationLogRootDir.getAbsolutePath(), e); + } + } + } + @Override public synchronized void start() { super.start(); @@ -109,6 +154,18 @@ public synchronized void stop() { " seconds has been exceeded. RUNNING background operations will be shut down", e); } } + cleanupLoggingRootDir(); + } + + private void cleanupLoggingRootDir() { + if (isOperationLogEnabled) { + try { + FileUtils.forceDelete(operationLogRootDir); + } catch (Exception e) { + LOG.warn("Failed to cleanup root dir of HS2 logging: " + operationLogRootDir + .getAbsolutePath(), e); + } + } } public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, @@ -132,6 +189,9 @@ public SessionHandle openSession(TProtocolVersion protocol, String username, Str session.setOperationManager(operationManager); try { session.initialize(sessionConf); + if (isOperationLogEnabled) { + session.setOperationLogSessionDir(operationLogRootDir); + } session.open(); } catch (Exception e) { throw new HiveSQLException("Failed to open new session", e); diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java index 37b05fc..e5ce72f 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java @@ -19,12 +19,17 @@ package org.apache.hive.service.cli.thrift; import java.net.InetSocketAddress; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.cli.CLIService; +import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; import org.apache.thrift.TProcessorFactory; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.server.TThreadPoolServer; @@ -65,6 +70,11 @@ public void run() { minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS); maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS); + workerKeepAliveTime = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME); + String threadPoolName = "HiveServer2-Handler-Pool"; + ExecutorService executorService = new ThreadPoolExecutor(minWorkerThreads, maxWorkerThreads, + workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue(), + new ThreadFactoryWithGarbageCleanup(threadPoolName)); TServerSocket serverSocket = null; if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)) { @@ -84,8 +94,7 @@ public void run() { .processorFactory(processorFactory) .transportFactory(transportFactory) .protocolFactory(new TBinaryProtocol.Factory()) - .minWorkerThreads(minWorkerThreads) - .maxWorkerThreads(maxWorkerThreads); + .executorService(executorService); server = new TThreadPoolServer(sargs); diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index be2eb01..86ed4b4 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -29,20 +29,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.service.AbstractService; import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.auth.TSetIpAddressProcessor; -import org.apache.hive.service.cli.CLIService; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.GetInfoType; -import org.apache.hive.service.cli.GetInfoValue; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.OperationStatus; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.*; import org.apache.hive.service.cli.session.SessionManager; import org.apache.thrift.TException; import org.apache.thrift.server.TServer; @@ -71,6 +61,7 @@ protected int minWorkerThreads; protected int maxWorkerThreads; + protected int workerKeepAliveTime; protected static HiveAuthFactory hiveAuthFactory; @@ -242,7 +233,9 @@ private String getUserName(TOpenSessionReq req) throws HiveSQLException { if (userName == null) { userName = req.getUsername(); } - return getProxyUser(userName, req.getConfiguration(), getIpAddress()); + String effectiveClientUser = getProxyUser(userName, req.getConfiguration(), getIpAddress()); + LOG.debug("Client's username: " + effectiveClientUser); + return effectiveClientUser; } /** @@ -532,7 +525,8 @@ public TFetchResultsResp FetchResults(TFetchResultsReq req) throws TException { RowSet rowSet = cliService.fetchResults( new OperationHandle(req.getOperationHandle()), FetchOrientation.getFetchOrientation(req.getOrientation()), - req.getMaxRows()); + req.getMaxRows(), + FetchType.getFetchType(req.getFetchType())); resp.setResults(rowSet.toTRowSet()); resp.setHasMoreRows(false); resp.setStatus(OK_STATUS); diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java index e3384d3..1af4539 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java @@ -22,18 +22,7 @@ import java.util.Map; import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.CLIServiceClient; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.GetInfoType; -import org.apache.hive.service.cli.GetInfoValue; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationStatus; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.*; import org.apache.thrift.TException; /** @@ -377,17 +366,15 @@ public TableSchema getResultSetMetadata(OperationHandle opHandle) } } - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle, org.apache.hive.service.cli.FetchOrientation, long) - */ @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) - throws HiveSQLException { + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows, + FetchType fetchType) throws HiveSQLException { try { TFetchResultsReq req = new TFetchResultsReq(); req.setOperationHandle(opHandle.toTOperationHandle()); req.setOrientation(orientation.toTFetchOrientation()); req.setMaxRows(maxRows); + req.setFetchType(fetchType.toTFetchType()); TFetchResultsResp resp = cliService.FetchResults(req); checkStatus(resp.getStatus()); return RowSetFactory.create(resp.getResults(), opHandle.getProtocolVersion()); @@ -404,7 +391,7 @@ public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientatio @Override public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { // TODO: set the correct default fetch size - return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 10000); + return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 10000, FetchType.QUERY_OUTPUT); } @Override diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java index c380b69..21d1563 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java @@ -18,6 +18,11 @@ package org.apache.hive.service.cli.thrift; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.shims.ShimLoader; @@ -26,6 +31,7 @@ import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.auth.HiveAuthFactory.AuthTypes; import org.apache.hive.service.cli.CLIService; +import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; import org.apache.thrift.TProcessor; import org.apache.thrift.TProcessorFactory; import org.apache.thrift.protocol.TBinaryProtocol; @@ -36,7 +42,7 @@ import org.eclipse.jetty.servlet.ServletContextHandler; import org.eclipse.jetty.servlet.ServletHolder; import org.eclipse.jetty.util.ssl.SslContextFactory; -import org.eclipse.jetty.util.thread.QueuedThreadPool; +import org.eclipse.jetty.util.thread.ExecutorThreadPool; public class ThriftHttpCLIService extends ThriftCLIService { @@ -63,13 +69,17 @@ public void run() { minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_MIN_WORKER_THREADS); maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_WORKER_THREADS); + workerKeepAliveTime = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME); String httpPath = getHttpPath(hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH)); httpServer = new org.eclipse.jetty.server.Server(); - QueuedThreadPool threadPool = new QueuedThreadPool(); - threadPool.setMinThreads(minWorkerThreads); - threadPool.setMaxThreads(maxWorkerThreads); + String threadPoolName = "HiveServer2-HttpHandler-Pool"; + ExecutorService executorService = new ThreadPoolExecutor(minWorkerThreads, maxWorkerThreads, + workerKeepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue(), + new ThreadFactoryWithGarbageCleanup(threadPoolName)); + + ExecutorThreadPool threadPool = new ExecutorThreadPool(executorService); httpServer.setThreadPool(threadPool); SelectChannelConnector connector = new SelectChannelConnector();; diff --git a/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java b/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java new file mode 100644 index 0000000..ec19abc --- /dev/null +++ b/service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hive.service.server; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ThreadFactory; + +import org.apache.hadoop.hive.metastore.RawStore; + +/** + * A ThreadFactory for constructing new HiveServer2 threads that lets you plug + * in custom cleanup code to be called before this thread is GC-ed. + * Currently cleans up the following: + * 1. ThreadLocal RawStore object: + * In case of an embedded metastore, HiveServer2 threads (foreground & background) + * end up caching a ThreadLocal RawStore object. The ThreadLocal RawStore object has + * an instance of PersistenceManagerFactory & PersistenceManager. + * The PersistenceManagerFactory keeps a cache of PersistenceManager objects, + * which are only removed when PersistenceManager#close method is called. + * HiveServer2 uses ExecutorService for managing thread pools for foreground & background threads. + * ExecutorService unfortunately does not provide any hooks to be called, + * when a thread from the pool is terminated. + * As a solution, we're using this ThreadFactory to keep a cache of RawStore objects per thread. + * And we are doing clean shutdown in the finalizer for each thread. + */ +public class ThreadFactoryWithGarbageCleanup implements ThreadFactory { + + private static Map threadRawStoreMap = new HashMap(); + + private final String namePrefix; + + public ThreadFactoryWithGarbageCleanup(String threadPoolName) { + namePrefix = threadPoolName; + } + + @Override + public Thread newThread(Runnable runnable) { + Thread newThread = new ThreadWithGarbageCleanup(runnable); + newThread.setName(namePrefix + ": Thread-" + newThread.getId()); + return newThread; + } + + public static Map getThreadRawStoreMap() { + return threadRawStoreMap; + } +} diff --git a/service/src/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java b/service/src/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java new file mode 100644 index 0000000..8ee9810 --- /dev/null +++ b/service/src/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hive.service.server; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.RawStore; + +/** + * A HiveServer2 thread used to construct new server threads. + * In particular, this thread ensures an orderly cleanup, + * when killed by its corresponding ExecutorService. + */ +public class ThreadWithGarbageCleanup extends Thread { + private static final Log LOG = LogFactory.getLog(ThreadWithGarbageCleanup.class); + + Map threadRawStoreMap = + ThreadFactoryWithGarbageCleanup.getThreadRawStoreMap(); + + public ThreadWithGarbageCleanup(Runnable runnable) { + super(runnable); + } + + /** + * Add any Thread specific garbage cleanup code here. + * Currently, it shuts down the RawStore object for this thread if it is not null. + */ + @Override + public void finalize() throws Throwable { + cleanRawStore(); + super.finalize(); + } + + private void cleanRawStore() { + Long threadId = this.getId(); + RawStore threadLocalRawStore = threadRawStoreMap.get(threadId); + if (threadLocalRawStore != null) { + LOG.debug("RawStore: " + threadLocalRawStore + ", for the thread: " + + this.getName() + " will be closed now."); + threadLocalRawStore.shutdown(); + threadRawStoreMap.remove(threadId); + } + } + + /** + * Cache the ThreadLocal RawStore object. Called from the corresponding thread. + */ + public void cacheThreadLocalRawStore() { + Long threadId = this.getId(); + RawStore threadLocalRawStore = HiveMetaStore.HMSHandler.getRawStore(); + if (threadLocalRawStore != null && !threadRawStoreMap.containsKey(threadId)) { + LOG.debug("Adding RawStore: " + threadLocalRawStore + ", for the thread: " + + this.getName() + " to threadRawStoreMap for future cleanup."); + threadRawStoreMap.put(threadId, threadLocalRawStore); + } + } +} diff --git a/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java b/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java new file mode 100644 index 0000000..4e63a30 --- /dev/null +++ b/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java @@ -0,0 +1,253 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hive.service.cli.operation; + +import org.junit.Assert; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hive.service.cli.*; +import org.apache.hive.service.cli.thrift.EmbeddedThriftBinaryCLIService; +import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; + +/** + * TestOperationLoggingAPI + * Test the FetchResults of TFetchType.LOG in thrift level. + */ +public class TestOperationLoggingAPI { + private HiveConf hiveConf = new HiveConf(); + private String tableName = "testOperationLoggingAPI_table"; + private File dataFile; + private ThriftCLIServiceClient client; + private SessionHandle sessionHandle; + private String sql = "select * from " + tableName; + private String[] expectedLogs = { + "Parsing command", + "Parse Completed", + "Starting Semantic Analysis", + "Semantic Analysis Completed", + "Starting command" + }; + + /** + * Start embedded mode, open a session, and create a table for cases usage + * @throws Exception + */ + @Before + public void setUp() throws Exception { + dataFile = new File(hiveConf.get("test.data.files"), "kv1.txt"); + EmbeddedThriftBinaryCLIService service = new EmbeddedThriftBinaryCLIService(); + service.init(hiveConf); + client = new ThriftCLIServiceClient(service); + sessionHandle = setupSession(); + } + + @After + public void tearDown() throws Exception { + // Cleanup + String queryString = "DROP TABLE " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + client.closeSession(sessionHandle); + } + + @Test + public void testFetchResultsOfLog() throws Exception { + // verify whether the sql operation log is generated and fetch correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + verifyFetchedLog(rowSetLog); + } + + @Test + public void testFetchResultsOfLogAsync() throws Exception { + // verify whether the sql operation log is generated and fetch correctly in async mode. + OperationHandle operationHandle = client.executeStatementAsync(sessionHandle, sql, null); + + // Poll on the operation status till the query is completed + boolean isQueryRunning = true; + long pollTimeout = System.currentTimeMillis() + 100000; + OperationStatus opStatus; + OperationState state = null; + RowSet rowSetAccumulated = null; + StringBuilder logs = new StringBuilder(); + + while (isQueryRunning) { + // Break if polling times out + if (System.currentTimeMillis() > pollTimeout) { + break; + } + opStatus = client.getOperationStatus(operationHandle); + Assert.assertNotNull(opStatus); + state = opStatus.getState(); + + rowSetAccumulated = client.fetchResults(operationHandle, FetchOrientation.FETCH_NEXT, 1000, + FetchType.LOG); + for (Object[] row : rowSetAccumulated) { + logs.append(row[0]); + } + + if (state == OperationState.CANCELED || + state == OperationState.CLOSED || + state == OperationState.FINISHED || + state == OperationState.ERROR) { + isQueryRunning = false; + } + Thread.sleep(10); + } + // The sql should be completed now. + Assert.assertEquals("Query should be finished", OperationState.FINISHED, state); + + // Verify the accumulated logs + verifyFetchedLog(logs.toString()); + + // Verify the fetched logs from the beginning of the log file + RowSet rowSet = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + verifyFetchedLog(rowSet); + } + + @Test + public void testFetchResultsOfLogWithOrientation() throws Exception { + // (FETCH_FIRST) execute a sql, and fetch its sql operation log as expected value + OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + int expectedLogLength = rowSetLog.numRows(); + + // (FETCH_NEXT) execute the same sql again, + // and fetch the sql operation log with FETCH_NEXT orientation + OperationHandle operationHandleWithOrientation = client.executeStatement(sessionHandle, sql, + null); + RowSet rowSetLogWithOrientation; + int logLength = 0; + int maxRows = calculateProperMaxRows(expectedLogLength); + do { + rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation, + FetchOrientation.FETCH_NEXT, maxRows, FetchType.LOG); + logLength += rowSetLogWithOrientation.numRows(); + } while (rowSetLogWithOrientation.numRows() == maxRows); + Assert.assertEquals(expectedLogLength, logLength); + + // (FETCH_FIRST) fetch again from the same operation handle with FETCH_FIRST orientation + rowSetLogWithOrientation = client.fetchResults(operationHandleWithOrientation, + FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG); + verifyFetchedLog(rowSetLogWithOrientation); + } + + @Test + public void testFetchResultsOfLogCleanup() throws Exception { + // Verify cleanup functionality. + // Open a new session, since this case needs to close the session in the end. + SessionHandle sessionHandleCleanup = setupSession(); + + // prepare + OperationHandle operationHandle = client.executeStatement(sessionHandleCleanup, sql, null); + RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, + FetchType.LOG); + verifyFetchedLog(rowSetLog); + + File sessionLogDir = new File( + hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION) + + File.separator + sessionHandleCleanup.getHandleIdentifier()); + File operationLogFile = new File(sessionLogDir, operationHandle.getHandleIdentifier().toString()); + + // check whether exception is thrown when fetching log from a closed operation. + client.closeOperation(operationHandle); + try { + client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000, FetchType.LOG); + Assert.fail("Fetch should fail"); + } catch (HiveSQLException e) { + Assert.assertTrue(e.getMessage().contains("Invalid OperationHandle:")); + } + + // check whether operation log file is deleted. + if (operationLogFile.exists()) { + Assert.fail("Operation log file should be deleted."); + } + + // check whether session log dir is deleted after session is closed. + client.closeSession(sessionHandleCleanup); + if (sessionLogDir.exists()) { + Assert.fail("Session log dir should be deleted."); + } + } + + private SessionHandle setupSession() throws Exception { + // Open a session + SessionHandle sessionHandle = client.openSession(null, null, null); + + // Change lock manager to embedded mode + String queryString = "SET hive.lock.manager=" + + "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"; + client.executeStatement(sessionHandle, queryString, null); + + // Drop the table if it exists + queryString = "DROP TABLE IF EXISTS " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + // Create a test table + queryString = "create table " + tableName + " (key int, value string)"; + client.executeStatement(sessionHandle, queryString, null); + + // Load data + queryString = "load data local inpath '" + dataFile + "' into table " + tableName; + client.executeStatement(sessionHandle, queryString, null); + + // Precondition check: verify whether the table is created and data is fetched correctly. + OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null); + RowSet rowSetResult = client.fetchResults(operationHandle); + Assert.assertEquals(500, rowSetResult.numRows()); + Assert.assertEquals(238, rowSetResult.iterator().next()[0]); + Assert.assertEquals("val_238", rowSetResult.iterator().next()[1]); + + return sessionHandle; + } + + // Since the log length of the sql operation may vary during HIVE dev, calculate a proper maxRows. + private int calculateProperMaxRows(int len) { + if (len < 10) { + return 1; + } else if (len < 100) { + return 10; + } else { + return 100; + } + } + + private void verifyFetchedLog(RowSet rowSet) { + StringBuilder stringBuilder = new StringBuilder(); + + for (Object[] row : rowSet) { + stringBuilder.append(row[0]); + } + + String logs = stringBuilder.toString(); + verifyFetchedLog(logs); + } + + private void verifyFetchedLog(String logs) { + for (String log : expectedLogs) { + Assert.assertTrue(logs.contains(log)); + } + } +}