diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index 3f87907..91f67b7 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -53,17 +53,19 @@ public final class FileUtils { private static final Log LOG = LogFactory.getLog(FileUtils.class.getName()); - /** - * Accept all paths. - */ - private static class AcceptAllPathFilter implements PathFilter { - @Override - public boolean accept(Path path) { - return true; + public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() { + public boolean accept(Path p) { + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); } - } + }; - private static final PathFilter allPathFilter = new AcceptAllPathFilter(); + public static final PathFilter STAGING_DIR_PATH_FILTER = new PathFilter() { + public boolean accept(Path p) { + String name = p.getName(); + return !name.startsWith("."); + } + }; /** * Variant of Path.makeQualified that qualifies the input path against the default file system @@ -319,14 +321,7 @@ public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, List results) throws IOException { if (fileStatus.isDir()) { - for (FileStatus stat : fs.listStatus(fileStatus.getPath(), new PathFilter() { - - @Override - public boolean accept(Path p) { - String name = p.getName(); - return !name.startsWith("_") && !name.startsWith("."); - } - })) { + for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) { listStatusRecursively(fs, stat, results); } } else { @@ -366,7 +361,6 @@ public static Path getPathOrParentThatExists(FileSystem fs, Path path) throws IO * check will be performed within a doAs() block to use the access privileges * of this user. In this case the user must be configured to impersonate other * users, otherwise this check will fail with error. - * @param groups List of groups for the user * @throws IOException * @throws AccessControlException * @throws InterruptedException @@ -547,10 +541,25 @@ public static boolean copy(FileSystem srcFS, Path src, boolean deleteSource, boolean overwrite, HiveConf conf) throws IOException { - boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf); + + HadoopShims shims = ShimLoader.getHadoopShims(); + boolean copied; + + /* Run distcp if source file/dir is too big */ + if (srcFS.getUri().getScheme().equals("hdfs") && + srcFS.getFileStatus(src).getLen() > conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE)) { + LOG.info("Source is " + srcFS.getFileStatus(src).getLen() + " bytes. (MAX: " + conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE) + ")"); + LOG.info("Launch distributed copy (distcp) job."); + copied = shims.runDistCp(src, dst, conf); + if (copied && deleteSource) { + srcFS.delete(src, true); + } + } else { + copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf); + } + boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); if (copied && inheritPerms) { - HadoopShims shims = ShimLoader.getHadoopShims(); HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst); try { shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst); @@ -571,7 +580,7 @@ public static boolean copy(FileSystem srcFS, Path src, * @throws IOException */ public static boolean trashFilesUnderDir(FileSystem fs, Path f, Configuration conf) throws FileNotFoundException, IOException { - FileStatus[] statuses = fs.listStatus(f, allPathFilter); + FileStatus[] statuses = fs.listStatus(f, HIDDEN_FILES_PATH_FILTER); boolean result = true; for (FileStatus status : statuses) { result = result & moveToTrash(fs, status.getPath(), conf); @@ -603,6 +612,25 @@ public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf) thr return result; } + /** + * Check if first path is a subdirectory of second path. + * Both paths must belong to the same filesystem. + * + * @param p1 first path + * @param p2 second path + * @param fs FileSystem, both paths must belong to the same filesystem + * @return + */ + public static boolean isSubDir(Path p1, Path p2, FileSystem fs) { + String path1 = fs.makeQualified(p1).toString(); + String path2 = fs.makeQualified(p2).toString(); + if (path1.startsWith(path2)) { + return true; + } + + return false; + } + public static boolean renameWithPerms(FileSystem fs, Path sourcePath, Path destPath, boolean inheritPerms, Configuration conf) throws IOException { diff --git a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java index 2d66c3b..9193f80 100644 --- a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java @@ -70,7 +70,7 @@ sb.append(Path.SEPARATOR).append("*"); } Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern); + return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 25cccd7..5e00575 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -215,6 +215,10 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo", "Query plan format serialization between client and task nodes. \n" + "Two supported values are : kryo and javaXML. Kryo is default."), + STAGINGDIR("hive.exec.stagingdir", ".hive-staging", + "Directory name that will be created inside table locations in order to support HDFS encryption. " + + "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " + + "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."), SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive", "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " + "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/ is created, " + @@ -742,6 +746,10 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" + "assumption that the original group by will reduce the data size."), + // Max filesize used to do a single copy (after that, distcp is used) + HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, + "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." + + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), // for hive udtf operator HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false, diff --git a/data/scripts/q_test_cleanup_for_encryption.sql b/data/scripts/q_test_cleanup_for_encryption.sql new file mode 100644 index 0000000..08264ee --- /dev/null +++ b/data/scripts/q_test_cleanup_for_encryption.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS src; diff --git a/data/scripts/q_test_init_for_encryption.sql b/data/scripts/q_test_init_for_encryption.sql new file mode 100644 index 0000000..1822ebb --- /dev/null +++ b/data/scripts/q_test_init_for_encryption.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS src; + +CREATE TABLE src(key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv1.txt" OVERWRITE INTO TABLE src; diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java index 08572a0..da376d8 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.shims.ShimLoader; @@ -149,7 +150,7 @@ public void close(boolean abort) throws IOException { fs.mkdirs(columnFamilyPath); Path srcDir = outputdir; for (;;) { - FileStatus [] files = fs.listStatus(srcDir); + FileStatus [] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER); if ((files == null) || (files.length == 0)) { throw new IOException("No family directories found in " + srcDir); } @@ -161,7 +162,7 @@ public void close(boolean abort) throws IOException { break; } } - for (FileStatus regionFile : fs.listStatus(srcDir)) { + for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) { fs.rename( regionFile.getPath(), new Path( diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index cc90129..03fe53d 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -696,7 +696,7 @@ private void discoverPartitions(JobContext context) throws IOException { // LOG.info("Searching for "+dynPathSpec); Path pathPattern = new Path(dynPathSpec); - FileStatus[] status = fs.globStatus(pathPattern); + FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); partitionsDiscoveredByPath = new LinkedHashMap>(); contextDiscoveredByPath = new LinkedHashMap(); diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java index 1a7595f..1cd5306 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java @@ -20,9 +20,7 @@ package org.apache.hive.hcatalog.mapreduce; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java index 8c87059..8b63e4d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java @@ -205,168 +205,217 @@ public void testCreateTable() throws Exception { @Test - public void testStaticPartition() throws Exception { - String tableName = "staticpart"; - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); - Assert.assertEquals(0,ret.getResponseCode()); + public void testInsertNonPartTable() throws Exception { + //case 1 is non-partitioned table. + String tableName = "nonpart"; + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + Assert.assertEquals(0, ret.getResponseCode()); + + String tableLoc = warehouseDir + "/" + tableName; assertExistence(warehouseDir + "/" + tableName); + + //case1A: insert into non-partitioned table. setPermission(warehouseDir + "/" + tableName); + ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0, ret.getResponseCode()); - ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); - Assert.assertEquals(0,ret.getResponseCode()); + verifyPermission(warehouseDir + "/" + tableName); + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child); + } - verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); + //case1B: insert overwrite non-partitioned-table + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0, ret.getResponseCode()); - Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); - for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { - verifyPermission(child); + verifyPermission(warehouseDir + "/" + tableName, 1); + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child, 1); } } @Test - public void testAlterPartition() throws Exception { - String tableName = "alterpart"; - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)"); - Assert.assertEquals(0,ret.getResponseCode()); + public void testInsertStaticSinglePartition() throws Exception { + String tableName = "singlestaticpart"; + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)"); + Assert.assertEquals(0, ret.getResponseCode()); assertExistence(warehouseDir + "/" + tableName); setPermission(warehouseDir + "/" + tableName); - ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); + //insert into test + ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'"); + Assert.assertEquals(0, ret.getResponseCode()); - assertExistence(warehouseDir + "/" + tableName); - setPermission(warehouseDir + "/" + tableName, 1); + verifyPermission(warehouseDir + "/" + tableName); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); - //alter partition - ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')"); - Assert.assertEquals(0,ret.getResponseCode()); + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { + verifyPermission(child); + } - verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1); + //insert overwrite test + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'"); + Assert.assertEquals(0, ret.getResponseCode()); - Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0); - for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) { + verifyPermission(warehouseDir + "/" + tableName, 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { verifyPermission(child, 1); } } - @Test - public void testDynamicPartitions() throws Exception { - String tableName = "dynamicpart"; - + public void testInsertStaticDualPartition() throws Exception { + String tableName = "dualstaticpart"; CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); - Assert.assertEquals(0,ret.getResponseCode()); + Assert.assertEquals(0, ret.getResponseCode()); assertExistence(warehouseDir + "/" + tableName); setPermission(warehouseDir + "/" + tableName); - ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); + //insert into test + ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); + Assert.assertEquals(0, ret.getResponseCode()); + verifyPermission(warehouseDir + "/" + tableName); verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2"); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2"); - Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { verifyPermission(child); } - Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0); - for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) { - verifyPermission(child); + //insert overwrite test + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); + Assert.assertEquals(0, ret.getResponseCode()); + + verifyPermission(warehouseDir + "/" + tableName, 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { + verifyPermission(child, 1); } } @Test - public void testExternalTable() throws Exception { - String tableName = "externaltable"; + public void testInsertDualDynamicPartitions() throws Exception { + String tableName = "dualdynamicpart"; - String myLocation = warehouseDir + "/myfolder"; - FileSystem fs = FileSystem.get(new URI(myLocation), conf); - fs.mkdirs(new Path(myLocation)); - setPermission(myLocation); + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); + Assert.assertEquals(0, ret.getResponseCode()); + assertExistence(warehouseDir + "/" + tableName); - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'"); - Assert.assertEquals(0,ret.getResponseCode()); + //Insert into test, with permission set 0. + setPermission(warehouseDir + "/" + tableName, 0); + ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); + Assert.assertEquals(0, ret.getResponseCode()); - ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); + verifyDualPartitionTable(warehouseDir + "/" + tableName, 0); - Assert.assertTrue(listStatus(myLocation).size() > 0); - for (String child : listStatus(myLocation)) { - verifyPermission(child); - } + //Insert overwrite test, with permission set 1. + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); + Assert.assertEquals(0, ret.getResponseCode()); + + verifyDualPartitionTable(warehouseDir + "/" + tableName, 1); } @Test - public void testInsert() throws Exception { - //case 1 is non-partitioned table. - String tableName = "insert"; + public void testInsertSingleDynamicPartition() throws Exception { + String tableName = "singledynamicpart"; - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)"); Assert.assertEquals(0,ret.getResponseCode()); - String tableLoc = warehouseDir + "/" + tableName; - assertExistence(warehouseDir + "/" + tableName); + assertExistence(tableLoc); - //case1A: insert into non-partitioned table. - setPermission(warehouseDir + "/" + tableName); - ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); + //Insert into test, with permission set 0. + setPermission(tableLoc, 0); + ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); + verifySinglePartition(tableLoc, 0); - Assert.assertTrue(listStatus(tableLoc).size() > 0); - for (String child : listStatus(tableLoc)) { - verifyPermission(child); - } - - //case1B: insert overwrite non-partitioned-table - setPermission(warehouseDir + "/" + tableName, 1); - ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc"); + //Insert overwrite test, with permission set 1. + setPermission(tableLoc, 1); + ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); + verifySinglePartition(tableLoc, 1); - Assert.assertTrue(listStatus(tableLoc).size() > 0); - for (String child : listStatus(tableLoc)) { - verifyPermission(child, 1); - } + //delete and re-insert using insert overwrite. There's different code paths insert vs insert overwrite for new tables. + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0, ret.getResponseCode()); + ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)"); + Assert.assertEquals(0, ret.getResponseCode()); - //case 2 is partitioned table. - tableName = "insertpartition"; + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName); - ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); + ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); + Assert.assertEquals(0, ret.getResponseCode()); + + verifySinglePartition(tableLoc, 0); + } + + @Test + public void testAlterPartition() throws Exception { + String tableName = "alterpart"; + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)"); Assert.assertEquals(0,ret.getResponseCode()); - ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); - String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; - assertExistence(partLoc); + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName, 1); - //case 2A: insert into partitioned table. - setPermission(partLoc); - ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + //alter partition + ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')"); Assert.assertEquals(0,ret.getResponseCode()); - Assert.assertTrue(listStatus(partLoc).size() > 0); - for (String child : listStatus(partLoc)) { - verifyPermission(child); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) { + verifyPermission(child, 1); } + } + + @Test + public void testExternalTable() throws Exception { + String tableName = "externaltable"; + + String myLocation = warehouseDir + "/myfolder"; + FileSystem fs = FileSystem.get(new URI(myLocation), conf); + fs.mkdirs(new Path(myLocation)); + setPermission(myLocation); - //case 2B: insert into non-partitioned table. - setPermission(partLoc, 1); - ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'"); Assert.assertEquals(0,ret.getResponseCode()); - Assert.assertTrue(listStatus(tableLoc).size() > 0); - for (String child : listStatus(partLoc)) { - verifyPermission(child, 1); + ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(myLocation).size() > 0); + for (String child : listStatus(myLocation)) { + verifyPermission(child); } } @@ -422,7 +471,7 @@ public void testLoadLocal() throws Exception { } //case 2B: insert data overwrite into non-partitioned table. - setPermission(partLoc, 1); + setPermission(tableLoc, 1); ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); Assert.assertEquals(0,ret.getResponseCode()); @@ -487,7 +536,7 @@ public void testLoad() throws Exception { } //case 2B: insert data overwrite into non-partitioned table. - setPermission(partLoc, 1); + setPermission(tableLoc, 1); fs.copyFromLocalFile(dataFilePath, new Path(location)); ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); Assert.assertEquals(0,ret.getResponseCode()); @@ -596,6 +645,40 @@ public void testExim() throws Exception { } } + private void verifySinglePartition(String tableLoc, int index) throws Exception { + verifyPermission(tableLoc + "/part1=1", index); + verifyPermission(tableLoc + "/part1=2", index); + + Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0); + for (String child : listStatus(tableLoc + "/part1=1")) { + verifyPermission(child, index); + } + + Assert.assertTrue(listStatus(tableLoc + "/part1=2").size() > 0); + for (String child : listStatus(tableLoc + "/part1=2")) { + verifyPermission(child, index); + } + } + + private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception { + verifyPermission(baseTablePath, index); + verifyPermission(baseTablePath + "/part1=1", index); + verifyPermission(baseTablePath + "/part1=1/part2=1", index); + + verifyPermission(baseTablePath + "/part1=2", index); + verifyPermission(baseTablePath + "/part1=2/part2=2", index); + + Assert.assertTrue(listStatus(baseTablePath + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(baseTablePath + "/part1=1/part2=1")) { + verifyPermission(child, index); + } + + Assert.assertTrue(listStatus(baseTablePath + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(baseTablePath + "/part1=2/part2=2")) { + verifyPermission(child, index); + } + } + private void assertExistence(String locn) throws Exception { Assert.assertTrue(fs.exists(new Path(locn))); } diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index a6a7547..14d7c8f 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -435,7 +435,7 @@ templatePath="${basedir}/${hive.path.to.root}/ql/src/test/templates/" template="TestCliDriver.vm" queryDirectory="${basedir}/${hive.path.to.root}/ql/src/test/queries/clientpositive/" queryFile="${qfile}" - excludeQueryFile="${minimr.query.files},${minitez.query.files}" + excludeQueryFile="${minimr.query.files},${minitez.query.files},${encrypted.query.files}" queryFileRegex="${qfile_regex}" clusterMode="${clustermode}" runDisabled="${run_disabled}" @@ -516,6 +516,24 @@ hadoopVersion="${active.hadoop.version}" initScript="q_test_init.sql" cleanupScript="q_test_cleanup.sql"/> + + diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 7a90c14..43c2d56 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -298,6 +298,14 @@ minitez.query.files=bucket_map_join_tez1.q,\ vectorized_dynamic_partition_pruning.q,\ tez_multi_union.q +encrypted.query.files=encryption_join_unencrypted_tbl.q,\ + encryption_insert_partition_static.q,\ + encryption_insert_partition_dynamic.q,\ + encryption_join_with_different_encryption_keys.q,\ + encryption_select_read_only_encrypted_tbl.q,\ + encryption_select_read_only_unencrypted_tbl.q,\ + encryption_load_data_to_encrypted_tables.q + beeline.positive.exclude=add_part_exist.q,\ alter1.q,\ alter2.q,\ diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index c01b727..842d150 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -38,8 +38,9 @@ import java.io.PrintStream; import java.io.Serializable; import java.io.StringWriter; -import java.lang.System; +import java.lang.RuntimeException; import java.net.URL; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -55,6 +56,7 @@ import junit.framework.Assert; +import org.apache.commons.lang.StringUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; @@ -86,6 +88,10 @@ import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.processors.CommandProcessor; +import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.processors.HiveCommand; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; @@ -105,10 +111,16 @@ public class QTestUtil { public static final String UTF_8 = "UTF-8"; + + // security property names + private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri"; + private static final String CRLF = System.getProperty("line.separator"); + private static final Log LOG = LogFactory.getLog("QTestUtil"); private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES"; private final String defaultInitScript = "q_test_init.sql"; private final String defaultCleanupScript = "q_test_cleanup.sql"; + private final String[] testOnlyCommands = new String[]{"crypto"}; private String testWarehouse; private final String testFiles; @@ -133,6 +145,8 @@ private CliDriver cliDriver; private HadoopShims.MiniMrShim mr = null; private HadoopShims.MiniDFSShim dfs = null; + private HadoopShims.HdfsEncryptionShim hes = null; + private boolean miniMr = false; private String hadoopVer = null; private QTestSetup setup = null; private boolean isSessionStateStarted = false; @@ -257,7 +271,7 @@ public void initConf() throws Exception { // Plug verifying metastore in for testing. conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); + "org.apache.hadoop.hive.metastore.VerifyingObjectStore"); if (mr != null) { assert dfs != null; @@ -284,6 +298,7 @@ public void initConf() throws Exception { mr, tez, spark, + encrypted, none; public static MiniClusterType valueForString(String type) { @@ -293,6 +308,8 @@ public static MiniClusterType valueForString(String type) { return tez; } else if (type.equals("spark")) { return spark; + } else if (type.equals("encrypted")) { + return encrypted; } else { return none; } @@ -305,6 +322,15 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, Stri this(outDir, logDir, clusterType, null, hadoopVer, initScript, cleanupScript); } + private String getKeyProviderURI() { + // Use the target directory if it is not specified + String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root")); + String keyDir = HIVE_ROOT + "ql/target/"; + + // put the jks file in the current test path only for test purpose + return "jceks://file" + new Path(keyDir, "test.jks").toUri(); + } + public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, String confDir, String hadoopVer, String initScript, String cleanupScript) throws Exception { @@ -329,8 +355,25 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, int numberOfDataNodes = 4; if (clusterType != MiniClusterType.none && clusterType != MiniClusterType.spark) { - dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null); - FileSystem fs = dfs.getFileSystem(); + FileSystem fs = null; + + if (clusterType == MiniClusterType.encrypted) { + // Set the security key provider so that the MiniDFS cluster is initialized + // with encryption + conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI()); + + dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null); + fs = dfs.getFileSystem(); + + // set up the java key provider for encrypted hdfs cluster + hes = shims.createHdfsEncryptionShim(fs, conf); + + LOG.info("key provider is initialized"); + } else { + dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null); + fs = dfs.getFileSystem(); + } + String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString()); if (clusterType == MiniClusterType.tez) { mr = shims.getMiniTezCluster(conf, 4, uriString, 1); @@ -346,7 +389,6 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, if (dataDir == null) { dataDir = new File(".").getAbsolutePath() + "/data/files"; } - testFiles = dataDir; // Use the current directory if it is not specified @@ -371,7 +413,10 @@ public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, } public void shutdown() throws Exception { - cleanUp(); + if (System.getenv(QTEST_LEAVE_FILES) == null) { + cleanUp(); + } + setup.tearDown(); if (mr != null) { mr.shutdown(); @@ -578,6 +623,19 @@ public void clearPostTestEffects() throws Exception { setup.postTest(conf); } + public void clearKeysCreatedInTests() { + if (hes == null) { + return; + } + try { + for (String keyAlias : hes.getKeys()) { + hes.deleteKey(keyAlias); + } + } catch (IOException e) { + LOG.error("Fail to clean the keys created in test due to the error", e); + } + } + /** * Clear out any side effects of running tests */ @@ -648,12 +706,17 @@ public void clearTestSideEffects() throws Exception { } clearTablesCreatedDuringTests(); + clearKeysCreatedInTests(); + + if (clusterType != MiniClusterType.encrypted) { + // allocate and initialize a new conf since a test can + // modify conf by using 'set' commands + conf = new HiveConf (Driver.class); + initConf(); + // renew the metastore since the cluster type is unencrypted + db = Hive.get(conf); // propagate new conf to meta store + } - // allocate and initialize a new conf since a test can - // modify conf by using 'set' commands - conf = new HiveConf (Driver.class); - initConf(); - db = Hive.get(conf); // propagate new conf to meta store setup.preTest(conf); } @@ -666,6 +729,7 @@ public void cleanUp() throws Exception { } clearTablesCreatedDuringTests(); + clearKeysCreatedInTests(); SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true); @@ -866,23 +930,131 @@ public int execute(String tname) { try { return drv.run(qMap.get(tname)).getResponseCode(); } catch (CommandNeedRetryException e) { - // TODO Auto-generated catch block + LOG.error("driver failed to run the command: " + tname + " due to the exception: ", e); e.printStackTrace(); return -1; } } - private static final String CRLF = System.getProperty("line.separator"); public int executeClient(String tname1, String tname2) { - String commands = getCommands(tname1) + CRLF + getCommands(tname2); - return cliDriver.processLine(commands); + String commands = getCommand(tname1) + CRLF + getCommand(tname2); + return executeClientInternal(commands); } public int executeClient(String tname) { - return cliDriver.processLine(getCommands(tname)); + return executeClientInternal(getCommand(tname)); } - private String getCommands(String tname) { + private int executeClientInternal(String commands) { + String [] cmds = commands.split(";"); + int rc = 0; + + String command = ""; + for (String oneCmd : cmds) { + if (StringUtils.endsWith(oneCmd, "\\")) { + command += StringUtils.chop(oneCmd) + "\\;"; + continue; + } else { + if (isHiveCommand(oneCmd)) { + command = oneCmd; + } else { + command += oneCmd; + } + } + if (StringUtils.isBlank(command)) { + continue; + } + + if (isCommandUsedForTesting(command)) { + rc = executeTestCommand(command); + } else { + rc = cliDriver.processLine(command); + } + + if (rc != 0) { + break; + } + command = ""; + } + + return rc; + } + + private boolean isHiveCommand(String command) { + String[] cmd = command.trim().split("\\s+"); + if (HiveCommand.find(cmd) != null) { + return true; + } else if (HiveCommand.find(cmd, HiveCommand.ONLY_FOR_TESTING) != null) { + return true; + } else { + return false; + } + } + + private int executeTestCommand(final String command) { + String commandName = command.trim().split("\\s+")[0]; + String commandArgs = command.trim().substring(commandName.length()); + + if (commandArgs.endsWith(";")) { + commandArgs = StringUtils.chop(commandArgs); + } + + //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed. + //we only want the absolute path, so remove the header, such as hdfs://localhost:57145 + String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE) + .replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); + commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}", + wareHouseDir); + + enableTestOnlyCmd(SessionState.get().getConf()); + + try { + CommandProcessor proc = getTestCommand(commandName); + if (proc != null) { + CommandProcessorResponse response = proc.run(commandArgs.trim()); + + int rc = response.getResponseCode(); + if (rc != 0) { + SessionState.get().out.println(response); + } + + return rc; + } else { + throw new RuntimeException("Could not get CommandProcessor for command: " + commandName); + } + } catch (Exception e) { + throw new RuntimeException("Could not execute test command: " + e.getMessage()); + } + } + + private CommandProcessor getTestCommand(final String commandName) throws SQLException { + HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING); + + if (testCommand == null) { + return null; + } + + return CommandProcessorFactory + .getForHiveCommandInternal(new String[]{commandName}, SessionState.get().getConf(), + testCommand.isOnlyForTesting()); + } + + private void enableTestOnlyCmd(HiveConf conf){ + StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST)); + for(String c : testOnlyCommands){ + securityCMDs.append(","); + securityCMDs.append(c); + } + conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString()); + } + + private boolean isCommandUsedForTesting(final String command) { + String commandName = command.trim().split("\\s+")[0]; + HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING); + return testCommand != null; + } + + private String getCommand(String tname) { String commands = qMap.get(tname); StringBuilder newCommands = new StringBuilder(commands.length()); int lastMatchEnd = 0; @@ -897,6 +1069,11 @@ private String getCommands(String tname) { return commands; } + private boolean isComment(final String line) { + String lineTrimmed = line.trim(); + return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--"); + } + public boolean shouldBeSkipped(String tname) { return qSkipSet.contains(tname); } @@ -921,7 +1098,7 @@ public void convertSequenceFileToTextFile() throws Exception { // Move all data from dest4_sequencefile to dest4 drv - .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*"); + .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*"); // Drop dest4_sequencefile db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile", @@ -1173,6 +1350,7 @@ private static String getElementValue(String line, String name) { private void maskPatterns(Pattern[] patterns, String fname) throws Exception { String maskPattern = "#### A masked pattern was here ####"; + String partialMaskPattern = "#### A PARTIAL masked pattern was here ####"; String line; BufferedReader in; @@ -1186,9 +1364,24 @@ private void maskPatterns(Pattern[] patterns, String fname) throws Exception { out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8")); boolean lastWasMasked = false; + boolean partialMaskWasMatched = false; + Matcher matcher; while (null != (line = in.readLine())) { - for (Pattern pattern : patterns) { - line = pattern.matcher(line).replaceAll(maskPattern); + if (clusterType == MiniClusterType.encrypted) { + for (Pattern pattern : partialReservedPlanMask) { + matcher = pattern.matcher(line); + if (matcher.find()) { + line = partialMaskPattern + " " + matcher.group(0); + partialMaskWasMatched = true; + break; + } + } + } + + if (!partialMaskWasMatched) { + for (Pattern pattern : patterns) { + line = pattern.matcher(line).replaceAll(maskPattern); + } } if (line.equals(maskPattern)) { @@ -1202,6 +1395,7 @@ private void maskPatterns(Pattern[] patterns, String fname) throws Exception { out.write(line); out.write("\n"); lastWasMasked = false; + partialMaskWasMatched = false; } } @@ -1241,7 +1435,13 @@ private void maskPatterns(Pattern[] patterns, String fname) throws Exception { ".*DagName:.*", ".*Input:.*/data/files/.*", ".*Output:.*/data/files/.*", - ".*total number of created files now is.*" + ".*total number of created files now is.*", + ".*.hive-staging.*" + }); + + private final Pattern[] partialReservedPlanMask = toPattern(new String[] { + "data/warehouse/(.*?/)+\\.hive-staging" // the directory might be db/table/partition + //TODO: add more expected test result here }); public int checkCliDriverResults(String tname) throws Exception { @@ -1640,8 +1840,10 @@ public static boolean queryListRunnerMultiThreaded(File[] qfiles, QTestUtil[] qt } public static void outputTestFailureHelpMessage() { - System.err.println("See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, " - + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs."); + System.err.println( + "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " + + "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " + + "test cases logs."); System.err.flush(); } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java index c2005e4..78d561c 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hive.ql.hooks; +import java.util.Arrays; import java.io.IOException; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.session.SessionState; @@ -30,7 +32,7 @@ public void run(HookContext hookContext) throws IOException { for (WriteEntity output : hookContext.getOutputs()) { Path tableLocation = new Path(output.getTable().getDataLocation().toString()); FileSystem fs = tableLocation.getFileSystem(SessionState.get().getConf()); - assert(fs.listStatus(tableLocation).length == 0); + assert(fs.listStatus(tableLocation, FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0); } } } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java index 83438c7..9df36b6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java @@ -184,7 +184,7 @@ public SkewedInfo(SkewedInfo other) { __this__skewedColValueLocationMaps.put(__this__skewedColValueLocationMaps_copy_key, __this__skewedColValueLocationMaps_copy_value); } - this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps; + this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 5f70c5d..0f7da53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -90,7 +90,7 @@ protected int tryCount = 0; private TokenRewriteStream tokenRewriteStream; - String executionId; + private String executionId; // List of Locks for this query protected List hiveLocks; @@ -112,6 +112,8 @@ private final Map> outputLockObjects = new HashMap>(); + private final String stagingDir; + public Context(Configuration conf) throws IOException { this(conf, generateExecutionId()); } @@ -129,6 +131,7 @@ public Context(Configuration conf, String executionId) { nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId); localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath(); scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR); } @@ -188,6 +191,65 @@ public String getCmd () { } /** + * Gets a temporary staging directory related to a path. + * If a path already contains a staging directory, then returns the current directory; otherwise + * create the directory if needed. + * + * @param inputPath URI of the temporary directory + * @param mkdir Create the directory if True. + * @return A temporary path. + */ + private Path getStagingDir(Path inputPath, boolean mkdir) { + final URI inputPathUri = inputPath.toUri(); + final String inputPathName = inputPathUri.getPath(); + final String fileSystem = inputPathUri.getScheme() + ":" + inputPathUri.getAuthority(); + final FileSystem fs; + + try { + fs = inputPath.getFileSystem(conf); + } catch (IOException e) { + throw new IllegalStateException("Error getting FileSystem for " + inputPath + ": "+ e, e); + } + + String stagingPathName; + if (inputPathName.indexOf(stagingDir) == -1) { + stagingPathName = new Path(inputPathName, stagingDir).toString(); + } else { + stagingPathName = inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length()); + } + + final String key = fileSystem + "-" + stagingPathName + "-" + TaskRunner.getTaskRunnerID(); + + Path dir = fsScratchDirs.get(key); + if (dir == null) { + // Append task specific info to stagingPathName, instead of creating a sub-directory. + // This way we don't have to worry about deleting the stagingPathName separately at + // end of query execution. + dir = fs.makeQualified(new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID())); + + LOG.debug("Created staging dir = " + dir + " for path = " + inputPath); + + if (mkdir) { + try { + if (!FileUtils.mkdir(fs, dir, true, conf)) { + throw new IllegalStateException("Cannot create staging directory '" + dir.toString() + "'"); + } + + if (isHDFSCleanup) { + fs.deleteOnExit(dir); + } + } catch (IOException e) { + throw new RuntimeException("Cannot create staging directory '" + dir.toString() + "': " + e.getMessage(), e); + } + } + + fsScratchDirs.put(key, dir); + } + + return dir; + } + + /** * Get a tmp directory on specified URI * * @param scheme Scheme of the target FS @@ -274,14 +336,13 @@ public Path getMRScratchDir() { } private Path getExternalScratchDir(URI extURI) { - return getScratchDir(extURI.getScheme(), extURI.getAuthority(), - !explain, nonLocalScratchPath.toUri().getPath()); + return getStagingDir(new Path(extURI.getScheme(), extURI.getAuthority(), extURI.getPath()), !explain); } /** * Remove any created scratch directories. */ - private void removeScratchDir() { + public void removeScratchDir() { for (Map.Entry entry : fsScratchDirs.entrySet()) { try { Path p = entry.getValue(); @@ -313,6 +374,10 @@ public boolean isMRTmpFileURI(String uriStr) { (uriStr.indexOf(MR_PREFIX) != -1); } + public Path getMRTmpPath(URI uri) { + return new Path(getStagingDir(new Path(uri), !explain), MR_PREFIX + nextPathId()); + } + /** * Get a path to store map-reduce intermediate data in. * @@ -333,10 +398,9 @@ public Path getLocalTmpPath() { } /** - * Get a path to store tmp data destined for external URI. + * Get a path to store tmp data destined for external Path. * - * @param extURI - * external URI to which the tmp data has to be eventually moved + * @param path external Path to which the tmp data has to be eventually moved * @return next available tmp path on the file system corresponding extURI */ public Path getExternalTmpPath(Path path) { @@ -357,9 +421,7 @@ public Path getExternalTmpPath(Path path) { * path within /tmp */ public Path getExtTmpPathRelTo(Path path) { - URI uri = path.toUri(); - return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain, - uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId()); + return new Path(getStagingDir(path, !explain), EXT_PREFIX + nextPathId()); } /** @@ -437,7 +499,7 @@ public DataInput getStream() { resFs = resDir.getFileSystem(conf); FileStatus status = resFs.getFileStatus(resDir); assert status.isDir(); - FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*")); + FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"), FileUtils.HIDDEN_FILES_PATH_FILTER); resDirPaths = new Path[resDirFS.length]; int pos = 0; for (FileStatus resFS : resDirFS) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 0ccab02..dbd6e60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -637,10 +637,10 @@ private boolean needConversion(TableDesc tableDesc, List partDesc boolean recursive = HiveConf.getBoolVar(job, HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE); // If this is in acid format always read it recursively regardless of what the jobconf says. if (!recursive && !AcidUtils.isAcid(p, job)) { - return fs.listStatus(p); + return fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER); } List results = new ArrayList(); - for (FileStatus stat : fs.listStatus(p)) { + for (FileStatus stat : fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER)) { FileUtils.listStatusRecursively(fs, stat, results); } return results.toArray(new FileStatus[results.size()]); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 22425be..05f801a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -58,6 +59,8 @@ import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.StringUtils; import java.io.IOException; @@ -99,7 +102,7 @@ private void moveFile(Path sourcePath, Path targetPath, boolean isDfsDir) if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS)) { deletePath = createTargetPath(targetPath, fs); } - if (!Hive.renameFile(conf, sourcePath, targetPath, fs, true, false)) { + if (!Hive.moveFile(conf, sourcePath, targetPath, fs, true, false)) { try { if (deletePath != null) { fs.delete(deletePath, true); @@ -158,8 +161,14 @@ private Path createTargetPath(Path targetPath, FileSystem fs) throws IOException actualPath = actualPath.getParent(); } fs.mkdirs(mkDirPath); + HadoopShims shims = ShimLoader.getHadoopShims(); if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS)) { - fs.setPermission(mkDirPath, fs.getFileStatus(actualPath).getPermission()); + try { + HadoopShims.HdfsFileStatus status = shims.getFullFileStatus(conf, fs, actualPath); + shims.setFullFileStatus(conf, status, fs, actualPath); + } catch (Exception e) { + LOG.warn("Error setting permissions or group of " + actualPath, e); + } } } return deletePath; @@ -259,7 +268,7 @@ public int execute(DriverContext driverContext) { dirs = srcFs.globStatus(tbd.getSourcePath()); files = new ArrayList(); for (int i = 0; (dirs != null && i < dirs.length); i++) { - files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath()))); + files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); // We only check one file, so exit the loop when we have at least // one. if (files.size() > 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 2e771ec..bf5bc35 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -94,6 +94,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveInterruptCallback; import org.apache.hadoop.hive.common.HiveInterruptUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; @@ -1801,7 +1802,7 @@ private static String replaceTaskIdFromFilename(String filename, String oldTaskI */ public static FileStatus[] listStatusIfExists(Path path, FileSystem fs) throws IOException { try { - return fs.listStatus(path); + return fs.listStatus(path, FileUtils.HIDDEN_FILES_PATH_FILTER); } catch (FileNotFoundException e) { // FS in hadoop 2.0 throws FNF instead of returning null return null; @@ -2637,7 +2638,7 @@ public static boolean isEmptyPath(JobConf job, Path dirPath) throws Exception { FileSystem inpFs = dirPath.getFileSystem(job); if (inpFs.exists(dirPath)) { - FileStatus[] fStats = inpFs.listStatus(dirPath); + FileStatus[] fStats = inpFs.listStatus(dirPath, FileUtils.HIDDEN_FILES_PATH_FILTER); if (fStats.length > 0) { return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java index 332ced8..6d87606 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable; @@ -98,7 +99,7 @@ public HiveIndexResult(List indexFiles, JobConf conf) throws IOException FileSystem fs = indexFilePath.getFileSystem(conf); FileStatus indexStat = fs.getFileStatus(indexFilePath); if (indexStat.isDir()) { - FileStatus[] fss = fs.listStatus(indexFilePath); + FileStatus[] fss = fs.listStatus(indexFilePath, FileUtils.HIDDEN_FILES_PATH_FILTER); for (FileStatus f : fss) { paths.add(f.getPath()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java index bd5734a..edcc3b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.plan.PartitionDesc; @@ -92,14 +91,7 @@ public RecordReader getRecordReader(InputSplit split, JobConf job, List errors = new ArrayList(); FileSystem fs = dir.getFileSystem(job); - FileStatus[] matches = fs.globStatus(dir, new PathFilter() { - - @Override - public boolean accept(Path p) { - String name = p.getName(); - return !name.startsWith("_") && !name.startsWith("."); - } - }); + FileStatus[] matches = fs.globStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER); if (matches == null) { errors.add(new IOException("Input path does not exist: " + dir)); } else if (matches.length == 0) { @@ -113,7 +105,8 @@ public boolean accept(Path p) { if (!errors.isEmpty()) { throw new InvalidInputException(errors); } - LOG.info("Total input paths to process : " + result.size()); + LOG.debug("Matches for " + dir + ": " + result); + LOG.info("Total input paths to process : " + result.size() + " from dir " + dir); return result.toArray(new FileStatus[result.size()]); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java index 8759661..e5a9ac1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -355,7 +356,7 @@ public int hashCode() { while (dirs.peek() != null) { Path tstPath = dirs.remove(); - FileStatus[] fStatus = inpFs.listStatus(tstPath); + FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER); for (int idx = 0; idx < fStatus.length; idx++) { if (fStatus[idx].isDir()) { dirs.offer(fStatus[idx].getPath()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java index 37e3879..feef854 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; @@ -58,7 +59,7 @@ public void rework(HiveConf job, MapredWork work) throws IOException { if (!fStatus.isDir()) { symlinks = new FileStatus[] { fStatus }; } else { - symlinks = fileSystem.listStatus(symlinkDir); + symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER); } toRemovePaths.add(path); ArrayList aliases = pathToAliases.remove(path); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java index 07a789f..d618984 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java @@ -23,19 +23,15 @@ import java.io.IOException; import java.io.InputStreamReader; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; -import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.FileInputFormat; @@ -192,7 +188,7 @@ private static void getTargetPathsFromSymlinksDirs( List targetPaths, List symlinkPaths) throws IOException { for (Path symlinkDir : symlinksDirs) { FileSystem fileSystem = symlinkDir.getFileSystem(conf); - FileStatus[] symlinks = fileSystem.listStatus(symlinkDir); + FileStatus[] symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER); // Read paths from each symlink file. for (FileStatus symlink : symlinks) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 298451d..3fdcc53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -29,6 +29,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -36,6 +37,7 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -48,6 +50,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.ObjectPair; @@ -1352,7 +1355,7 @@ public Partition loadPartition(Path loadPath, Table tbl, } if (replace) { - Hive.replaceFiles(loadPath, newPartPath, oldPartPath, getConf(), + Hive.replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), isSrcLocal); } else { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); @@ -1411,7 +1414,7 @@ private void walkDirTree(FileStatus fSta, FileSystem fSys, } /* dfs. */ - FileStatus[] children = fSys.listStatus(fSta.getPath()); + FileStatus[] children = fSys.listStatus(fSta.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); if (children != null) { for (FileStatus child : children) { walkDirTree(child, fSys, skewedColValueLocationMaps, newPartPath, skewedInfo); @@ -2187,7 +2190,7 @@ public boolean grantRole(String roleName, String userName, boolean grantOption) throws HiveException { try { return getMSC().grant_role(roleName, userName, principalType, grantor, - grantorType, grantOption); + grantorType, grantOption); } catch (Exception e) { throw new HiveException(e); } @@ -2282,13 +2285,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, for (FileStatus src : srcs) { FileStatus[] items; if (src.isDir()) { - items = srcFs.listStatus(src.getPath(), new PathFilter() { - @Override - public boolean accept(Path p) { - String name = p.getName(); - return !name.startsWith("_") && !name.startsWith("."); - } - }); + items = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(items); } else { items = new FileStatus[] {src}; @@ -2308,9 +2305,10 @@ public boolean accept(Path p) { } if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES) && + !HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR).equals(itemSource.getName()) && item.isDir()) { throw new HiveException("checkPaths: " + src.getPath() - + " has nested directory" + itemSource); + + " has nested directory " + itemSource); } // Strip off the file type, if any so we don't make: // 000000_0.gz -> 000000_0.gz_copy_1 @@ -2361,11 +2359,54 @@ private static boolean destExists(List> result, Path proposed) { return false; } + private static boolean isSubDir(Path srcf, Path destf, FileSystem fs, boolean isSrcLocal){ + if (srcf == null) { + LOG.debug("The source path is null for isSubDir method."); + return false; + } + + String fullF1 = getQualifiedPathWithoutSchemeAndAuthority(srcf, fs); + String fullF2 = getQualifiedPathWithoutSchemeAndAuthority(destf, fs); + + boolean isInTest = Boolean.valueOf(HiveConf.getBoolVar(fs.getConf(), ConfVars.HIVE_IN_TEST)); + // In the automation, the data warehouse is the local file system based. + LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + if (isInTest) { + return fullF1.startsWith(fullF2); + } + + // schema is diff, return false + String schemaSrcf = srcf.toUri().getScheme(); + String schemaDestf = destf.toUri().getScheme(); + + // if the schemaDestf is null, it means the destination is not in the local file system + if (schemaDestf == null && isSrcLocal) { + LOG.debug("The source file is in the local while the dest not."); + return false; + } + + // If both schema information are provided, they should be the same. + if (schemaSrcf != null && schemaDestf != null && !schemaSrcf.equals(schemaDestf)) { + LOG.debug("The source path's schema is " + schemaSrcf + + " and the destination path's schema is " + schemaDestf + "."); + return false; + } + + LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2); + return fullF1.startsWith(fullF2); + } + + private static String getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileSystem fs) { + Path currentWorkingDir = fs.getWorkingDirectory(); + Path path = srcf.makeQualified(srcf.toUri(), currentWorkingDir); + return Path.getPathWithoutSchemeAndAuthority(path).toString(); + } + //it is assumed that parent directory of the destf should already exist when this //method is called. when the replace value is true, this method works a little different //from mv command if the destf is a directory, it replaces the destf instead of moving under //the destf. in this case, the replaced destf still preserves the original destf's permission - public static boolean renameFile(HiveConf conf, Path srcf, Path destf, + public static boolean moveFile(HiveConf conf, Path srcf, Path destf, FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException { boolean success = false; @@ -2374,17 +2415,26 @@ public static boolean renameFile(HiveConf conf, Path srcf, Path destf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); HadoopShims shims = ShimLoader.getHadoopShims(); HadoopShims.HdfsFileStatus destStatus = null; + HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); + // If source path is a subdirectory of the destination path: + // ex: INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300; + // where the staging directory is a subdirectory of the destination directory + // (1) Do not delete the dest dir before doing the move operation. + // (2) It is assumed that subdir and dir are in same encryption zone. + // (3) Move individual files from scr dir to dest dir. + boolean destIsSubDir = isSubDir(srcf, destf, fs, isSrcLocal); try { if (inheritPerms || replace) { try{ - destStatus = shims.getFullFileStatus(conf, fs, destf); + destStatus = shims.getFullFileStatus(conf, fs, destf.getParent()); //if destf is an existing directory: //if replace is true, delete followed by rename(mv) is equivalent to replace //if replace is false, rename (mv) actually move the src under dest dir //if destf is an existing file, rename is actually a replace, and do not need // to delete the file first - if (replace && destStatus.getFileStatus().isDir()) { + if (replace && !destIsSubDir) { + LOG.debug("The path " + destf.toString() + " is deleted"); fs.delete(destf, true); } } catch (FileNotFoundException ignore) { @@ -2396,14 +2446,39 @@ public static boolean renameFile(HiveConf conf, Path srcf, Path destf, } if (!isSrcLocal) { // For NOT local src file, rename the file - success = fs.rename(srcf, destf); + if (hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf)) + && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf)) + { + LOG.info("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different."); + success = FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf, + true, // delete source + replace, // overwrite destination + conf); + } else { + if (destIsSubDir) { + FileStatus[] srcs = fs.listStatus(srcf, FileUtils.HIDDEN_FILES_PATH_FILTER); + for (FileStatus status : srcs) { + success = FileUtils.copy(srcf.getFileSystem(conf), status.getPath(), destf.getFileSystem(conf), destf, + true, // delete source + replace, // overwrite destination + conf); + + if (!success) { + throw new HiveException("Unable to move source " + status.getPath() + " to destination " + destf); + } + } + } else { + success = fs.rename(srcf, destf); + } + } } else { // For local src file, copy to hdfs fs.copyFromLocalFile(srcf, destf); success = true; } - LOG.info((replace ? "Replacing src:" : "Renaming src:") + srcf.toString() - + ";dest: " + destf.toString() + ";Status:" + success); + + LOG.info((replace ? "Replacing src:" : "Renaming src: ") + srcf.toString() + + ", dest: " + destf.toString() + ", Status:" + success); } catch (IOException ioe) { throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe); } @@ -2470,7 +2545,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, try { for (List sdpairs : result) { for (Path[] sdpair : sdpairs) { - if (!renameFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) { + if (!moveFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) { throw new IOException("Cannot move " + sdpair[0] + " to " + sdpair[1]); } @@ -2563,6 +2638,7 @@ private static void moveAcidFiles(FileSystem fs, FileStatus[] stats, Path dst) * srcf, destf, and tmppath should resident in the same DFS, but the oldPath can be in a * different DFS. * + * @param tablePath path of the table. Used to identify permission inheritance. * @param srcf * Source directory to be renamed to tmppath. It should be a * leaf directory where the final data files reside. However it @@ -2570,13 +2646,15 @@ private static void moveAcidFiles(FileSystem fs, FileStatus[] stats, Path dst) * @param destf * The directory where the final data needs to go * @param oldPath - * The directory where the old data location, need to be cleaned up. + * The directory where the old data location, need to be cleaned up. Most of time, will be the same + * as destf, unless its across FileSystem boundaries. * @param isSrcLocal * If the source directory is LOCAL */ - static protected void replaceFiles(Path srcf, Path destf, Path oldPath, - HiveConf conf, boolean isSrcLocal) throws HiveException { + protected static void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf, + boolean isSrcLocal) throws HiveException { try { + FileSystem destFs = destf.getFileSystem(conf); boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); @@ -2597,15 +2675,24 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, List> result = checkPaths(conf, destFs, srcs, srcFs, destf, true); + HadoopShims shims = ShimLoader.getHadoopShims(); if (oldPath != null) { try { FileSystem fs2 = oldPath.getFileSystem(conf); if (fs2.exists(oldPath)) { - FileUtils.trashFilesUnderDir(fs2, oldPath, conf); + // Do not delete oldPath if: + // - destf is subdir of oldPath + //if ( !(fs2.equals(destf.getFileSystem(conf)) && FileUtils.isSubDir(oldPath, destf, fs2))) + if (FileUtils.isSubDir(oldPath, destf, fs2)) { + FileUtils.trashFilesUnderDir(fs2, oldPath, conf); + } + if (inheritPerms) { + inheritFromTable(tablePath, destf, conf, destFs); + } } } catch (Exception e) { //swallow the exception - LOG.warn("Directory " + oldPath.toString() + " canot be removed:" + StringUtils.stringifyException(e)); + LOG.warn("Directory " + oldPath.toString() + " cannot be removed: " + e, e); } } @@ -2619,15 +2706,30 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, LOG.warn("Error creating directory " + destf.toString()); } if (inheritPerms && success) { - destFs.setPermission(destfp, destFs.getFileStatus(destfp.getParent()).getPermission()); + inheritFromTable(tablePath, destfp, conf, destFs); } } - boolean b = renameFile(conf, srcs[0].getPath(), destf, destFs, true, - isSrcLocal); - if (!b) { - throw new HiveException("Unable to move results from " + srcs[0].getPath() - + " to destination directory: " + destf); + // Copy/move each file under the source directory to avoid to delete the destination + // directory if it is the root of an HDFS encryption zone. + for (List sdpairs : result) { + for (Path[] sdpair : sdpairs) { + Path destParent = sdpair[1].getParent(); + FileSystem destParentFs = destParent.getFileSystem(conf); + if (!destParentFs.isDirectory(destParent)) { + boolean success = destFs.mkdirs(destParent); + if (!success) { + LOG.warn("Error creating directory " + destParent); + } + if (inheritPerms && success) { + inheritFromTable(tablePath, destParent, conf, destFs); + } + } + if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true, isSrcLocal)) { + throw new IOException("Unable to move file/directory from " + sdpair[0] + + " to " + sdpair[1]); + } + } } } else { // srcf is a file or pattern containing wildcards if (!destFs.exists(destf)) { @@ -2636,13 +2738,13 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, LOG.warn("Error creating directory " + destf.toString()); } if (inheritPerms && success) { - destFs.setPermission(destf, destFs.getFileStatus(destf.getParent()).getPermission()); + inheritFromTable(tablePath, destf, conf, destFs); } } // srcs must be a list of files -- ensured by LoadSemanticAnalyzer for (List sdpairs : result) { for (Path[] sdpair : sdpairs) { - if (!renameFile(conf, sdpair[0], sdpair[1], destFs, true, + if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true, isSrcLocal)) { throw new IOException("Error moving: " + sdpair[0] + " into: " + sdpair[1]); } @@ -2654,6 +2756,38 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, } } + /** + * This method sets all paths from tablePath to destf (including destf) to have same permission as tablePath. + * @param tablePath path of table + * @param destf path of table-subdir. + * @param conf + * @param fs + */ + private static void inheritFromTable(Path tablePath, Path destf, HiveConf conf, FileSystem fs) { + if (!FileUtils.isSubDir(destf, tablePath, fs)) { + //partition may not be under the parent. + return; + } + HadoopShims shims = ShimLoader.getHadoopShims(); + //Calculate all the paths from the table dir, to destf + //At end of this loop, currPath is table dir, and pathsToSet contain list of all those paths. + Path currPath = destf; + List pathsToSet = new LinkedList(); + while (!currPath.equals(tablePath)) { + pathsToSet.add(currPath); + currPath = currPath.getParent(); + } + + try { + HadoopShims.HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, fs, currPath); + for (Path pathToSet : pathsToSet) { + shims.setFullFileStatus(conf, fullFileStatus, fs, pathToSet); + } + } catch (Exception e) { + LOG.warn("Error setting permissions or group of " + destf, e); + } + } + public static boolean isHadoop1() { return ShimLoader.getMajorVersion().startsWith("0.20"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java index 7803169..fa0abad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; @@ -147,7 +148,7 @@ void findUnknownTables(String dbName, List tables, CheckResult result) for (Path dbPath : dbPaths) { FileSystem fs = dbPath.getFileSystem(conf); - FileStatus[] statuses = fs.listStatus(dbPath); + FileStatus[] statuses = fs.listStatus(dbPath, FileUtils.HIDDEN_FILES_PATH_FILTER); for (FileStatus status : statuses) { if (status.isDir() && !tableNames.contains(status.getPath().getName())) { @@ -362,7 +363,7 @@ private void getAllLeafDirs(Path basePath, Set allDirs) private void getAllLeafDirs(Path basePath, Set allDirs, FileSystem fs) throws IOException { - FileStatus[] statuses = fs.listStatus(basePath); + FileStatus[] statuses = fs.listStatus(basePath, FileUtils.HIDDEN_FILES_PATH_FILTER); boolean directoryFound=false; for (FileStatus status : statuses) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index bafaff8..f8007e1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; import org.apache.hadoop.hive.metastore.Warehouse; @@ -336,7 +337,7 @@ public int getBucketCount() { * partition String pathPattern = this.partPath.toString() + "/*"; try { * FileSystem fs = FileSystem.get(this.table.getDataLocation(), * Hive.get().getConf()); FileStatus srcs[] = fs.globStatus(new - * Path(pathPattern)); numBuckets = srcs.length; } catch (Exception e) { + * Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER); numBuckets = srcs.length; } catch (Exception e) { * throw new RuntimeException("Cannot get bucket count for table " + * this.table.getName(), e); } } return numBuckets; */ @@ -372,7 +373,7 @@ public void setBucketCount(int newBucketNum) { pathPattern = pathPattern + "/*"; } LOG.info("Path pattern = " + pathPattern); - FileStatus srcs[] = fs.globStatus(new Path(pathPattern)); + FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(srcs); for (FileStatus src : srcs) { LOG.info("Got file: " + src.getPath()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 35d60f8..a996adf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ProtectMode; @@ -636,7 +637,7 @@ public int getNumBuckets() { protected void replaceFiles(Path srcf, boolean isSrcLocal) throws HiveException { Path tableDest = getPath(); - Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf(), + Hive.replaceFiles(tableDest, srcf, tableDest, tableDest, Hive.get().getConf(), isSrcLocal); } @@ -951,7 +952,7 @@ public static String getCompleteName(String dbName, String tabName) { pathPattern = pathPattern + "/*"; } LOG.info("Path pattern = " + pathPattern); - FileStatus srcs[] = fs.globStatus(new Path(pathPattern)); + FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER); Arrays.sort(srcs); for (FileStatus src : srcs) { LOG.info("Got file: " + src.getPath()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java index 03742d4..638397d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -81,7 +82,7 @@ abstract public Object process(Node nd, Stack stack, NodeProcessorCtx proc List fileNames = new ArrayList(); try { FileSystem fs = location.getFileSystem(pGraphContext.getConf()); - FileStatus[] files = fs.listStatus(new Path(location.toString())); + FileStatus[] files = fs.listStatus(new Path(location.toString()), FileUtils.HIDDEN_FILES_PATH_FILTER); if (files != null) { for (FileStatus file : files) { fileNames.add(file.getPath().toString()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java index e2768ff..2729ceb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.ql.Driver; @@ -123,22 +124,23 @@ private static boolean isIndexPartitionFresh(Hive hive, Index index, Partition part) throws HiveException { LOG.info("checking index staleness..."); try { - FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf()); - FileStatus partFss = partFs.getFileStatus(part.getDataLocation()); - String ts = index.getParameters().get(part.getSpec().toString()); - if (ts == null) { + String indexTs = index.getParameters().get(part.getSpec().toString()); + if (indexTs == null) { return false; } - long indexTs = Long.parseLong(ts); - LOG.info(partFss.getModificationTime()); - LOG.info(ts); - if (partFss.getModificationTime() > indexTs) { - LOG.info("index is stale on the partitions that matched " + part.getSpec()); - return false; + + FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf()); + FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER); + for (FileStatus status : parts) { + if (status.getModificationTime() > Long.parseLong(indexTs)) { + LOG.info("Index is stale on partition '" + part.getName() + + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() + + "' is higher than index creation time (" + indexTs + ")."); + return false; + } } } catch (IOException e) { - LOG.info("failed to grab timestamp info"); - throw new HiveException(e); + throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e); } return true; } @@ -156,22 +158,23 @@ private static boolean isIndexTableFresh(Hive hive, List indexes, Table s for (Index index : indexes) { LOG.info("checking index staleness..."); try { - FileSystem srcFs = src.getPath().getFileSystem(hive.getConf()); - FileStatus srcFss= srcFs.getFileStatus(src.getPath()); - String ts = index.getParameters().get("base_timestamp"); - if (ts == null) { + String indexTs = index.getParameters().get("base_timestamp"); + if (indexTs == null) { return false; } - long indexTs = Long.parseLong(ts); - LOG.info(srcFss.getModificationTime()); - LOG.info(ts); - if (srcFss.getModificationTime() > indexTs) { - LOG.info("index is stale "); - return false; + + FileSystem srcFs = src.getPath().getFileSystem(hive.getConf()); + FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); + for (FileStatus status : srcs) { + if (status.getModificationTime() > Long.parseLong(indexTs)) { + LOG.info("Index is stale on table '" + src.getTableName() + + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() + + "' is higher than index creation time (" + indexTs + ")."); + return false; + } } } catch (IOException e) { - LOG.info("failed to grab timestamp info"); - throw new HiveException(e); + throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e); } } return true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java index 7091fef..18880e6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; @@ -68,7 +69,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast, "Target is not a directory : " + toURI)); } else { - FileStatus[] files = fs.listStatus(toPath); + FileStatus[] files = fs.listStatus(toPath, FileUtils.HIDDEN_FILES_PATH_FILTER); if (files != null && files.length != 0) { throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast, "Target is not an empty directory : " + toURI)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 710884a..04e0eea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; @@ -357,7 +358,7 @@ private void checkTargetLocationEmpty(FileSystem fs, Path targetPath) throws IOException, SemanticException { LOG.debug("checking emptiness of " + targetPath.toString()); if (fs.exists(targetPath)) { - FileStatus[] status = fs.listStatus(targetPath); + FileStatus[] status = fs.listStatus(targetPath, FileUtils.HIDDEN_FILES_PATH_FILTER); if (status.length > 0) { LOG.debug("Files inc. " + status[0].getPath().toString() + " found in path : " + targetPath.toString()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 168ff53..1e0edfa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.Serializable; +import java.security.AccessControlException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -45,8 +46,10 @@ import org.antlr.runtime.tree.TreeWizard.ContextVisitor; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -201,9 +204,12 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.security.UserGroupInformation; /** * Implementation of the semantic analyzer. It generates the query plan. @@ -1669,7 +1675,7 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException throw new SemanticException(e); } try { - fname = ctx.getExternalTmpPath( + fname = ctx.getExtTmpPathRelTo( FileUtils.makeQualified(location, conf)).toString(); } catch (Exception e) { throw new SemanticException(generateErrorMessage(ast, @@ -1685,8 +1691,9 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException } else { // This is the only place where isQuery is set to true; it defaults to false. qb.setIsQuery(true); - fname = ctx.getMRTmpPath().toString(); - ctx.setResDir(new Path(fname)); + Path stagingPath = getStagingDirectoryPathname(qb); + fname = stagingPath.toString(); + ctx.setResDir(stagingPath); } } qb.getMetaData().setDestForAlias(name, fname, @@ -1742,6 +1749,160 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException } } + /** + * Checks if a given path is encrypted (valid only for HDFS files) + * @param path The path to check for encryption + * @return True if the path is encrypted; False if it is not encrypted + * @throws HiveException If an error occurs while checking for encryption + */ + private boolean isPathEncrypted(Path path) throws HiveException { + HadoopShims.HdfsEncryptionShim hdfsEncryptionShim; + + hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); + if (hdfsEncryptionShim != null) { + try { + if (hdfsEncryptionShim.isPathEncrypted(path)) { + return true; + } + } catch (Exception e) { + throw new HiveException("Unable to determine if " + path + "is encrypted: " + e, e); + } + } + + return false; + } + + /** + * Compares to path key encryption strenghts. + * + * @param p1 Path to an HDFS file system + * @param p2 Path to an HDFS file system + * @return -1 if strength is weak; 0 if is equals; 1 if it is stronger + * @throws HiveException If an error occurs while comparing key strengths. + */ + private int comparePathKeyStrength(Path p1, Path p2) throws HiveException { + HadoopShims.HdfsEncryptionShim hdfsEncryptionShim; + + hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim(); + if (hdfsEncryptionShim != null) { + try { + return hdfsEncryptionShim.comparePathKeyStrength(p1, p2); + } catch (Exception e) { + throw new HiveException("Unable to compare key strength for " + p1 + " and " + p2 + " : " + e, e); + } + } + + return 0; // Non-encrypted path (or equals strength) + } + + /** + * Checks if a given path has read-only access permissions. + * + * @param path The path to check for read-only permissions. + * @return True if the path is read-only; False otherwise. + * @throws HiveException If an error occurs while checking file permissions. + */ + private boolean isPathReadOnly(Path path) throws HiveException { + HiveConf conf = SessionState.get().getConf(); + try { + FileSystem fs = path.getFileSystem(conf); + UserGroupInformation ugi = Utils.getUGI(); + FileStatus status = fs.getFileStatus(path); + + // We just check for writing permissions. If it fails with AccessControException, then it + // means the location may be read-only. + FileUtils.checkFileAccessWithImpersonation(fs, status, FsAction.WRITE, ugi.getUserName()); + + // Path has writing permissions + return false; + } catch (AccessControlException e) { + // An AccessControlException may be caused for other different errors, + // but we take it as if our path is read-only + return true; + } catch (Exception e) { + throw new HiveException("Unable to determine if " + path + " is read only: " + e, e); + } + } + + /** + * Gets the strongest encrypted table path. + * + * @param qb The QB object that contains a list of all table locations. + * @return The strongest encrypted path + * @throws HiveException if an error occurred attempting to compare the encryption strength + */ + private Path getStrongestEncryptedTablePath(QB qb) throws HiveException { + List tabAliases = new ArrayList(qb.getTabAliases()); + Path strongestPath = null; + + /* Walk through all found table locations to get the most encrypted table */ + for (String alias : tabAliases) { + Table tab = qb.getMetaData().getTableForAlias(alias); + if (tab != null) { + Path tablePath = tab.getDataLocation(); + if (tablePath != null) { + try { + if (strongestPath == null) { + strongestPath = tablePath; + } else if ("hdfs".equals(tablePath.toUri().getScheme()) + && isPathEncrypted(tablePath) + && comparePathKeyStrength(tablePath, strongestPath) > 0) + { + strongestPath = tablePath; + } + } catch (HiveException e) { + throw new HiveException("Unable to find the most secure table path: " + e, e); + } + } + } + } + + return strongestPath; + } + + /** + * Gets the staging directory where MR files will be stored temporary. + * It walks through the QB plan to find the correct location where save temporary files. This + * temporary location (or staging directory) may be created inside encrypted tables locations for + * security reasons. If the QB has read-only tables, then the older scratch directory will be used, + * or a permission error will be thrown if the requested query table is encrypted and the old scratch + * directory is not. + * + * @param qb The QB object that contains a list of all table locations. + * @return The path to the staging directory. + * @throws HiveException If an error occurs while identifying the correct staging location. + */ + private Path getStagingDirectoryPathname(QB qb) throws HiveException { + Path stagingPath = null, tablePath; + + // Looks for the most encrypted table location (if there is one) + tablePath = getStrongestEncryptedTablePath(qb); + if (tablePath != null && isPathEncrypted(tablePath)) { + // Only HDFS paths can be checked for encryption + if ("hdfs".equals(tablePath.toUri().getScheme())) { + if (isPathReadOnly(tablePath)) { + Path tmpPath = ctx.getMRTmpPath(); + if (comparePathKeyStrength(tablePath, tmpPath) < 0) { + throw new HiveException("Read-only encrypted tables cannot be read " + + "if the scratch directory is not encrypted (or encryption is weak)"); + } else { + stagingPath = tmpPath; + } + } + } else { + LOG.debug("Encryption is not applicable to table path " + tablePath.toString()); + } + + if (stagingPath == null) { + stagingPath = ctx.getMRTmpPath(tablePath.toUri()); + } + } else { + stagingPath = ctx.getMRTmpPath(); + } + + return stagingPath; + } + private void replaceViewReferenceWithDefinition(QB qb, Table tab, String tab_name, String alias) throws SemanticException { @@ -5948,7 +6109,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) if (isNonNativeTable) { queryTmpdir = dest_path; } else { - queryTmpdir = ctx.getExternalTmpPath(dest_path); + queryTmpdir = ctx.getExtTmpPathRelTo(dest_path); } if (dpCtx != null) { // set the root of the temporary path where dynamic partition columns will populate @@ -6129,7 +6290,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) try { Path qPath = FileUtils.makeQualified(dest_path, conf); - queryTmpdir = ctx.getExternalTmpPath(qPath); + queryTmpdir = ctx.getExtTmpPathRelTo(qPath); } catch (Exception e) { throw new SemanticException("Error creating temporary folder on: " + dest_path, e); @@ -6310,7 +6471,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) // it should be the same as the MoveWork's sourceDir. fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString()); if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { - String statsTmpLoc = ctx.getExternalTmpPath(queryTmpdir).toString(); + String statsTmpLoc = ctx.getExtTmpPathRelTo(queryTmpdir).toString(); LOG.info("Set stats collection dir : " + statsTmpLoc); conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc); } @@ -9457,7 +9618,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String tsDesc.setGatherStats(false); } else { if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { - String statsTmpLoc = ctx.getExternalTmpPath(tab.getPath()).toString(); + String statsTmpLoc = ctx.getExtTmpPathRelTo(tab.getPath()).toString(); LOG.info("Set stats collection dir : " + statsTmpLoc); conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java index 727f61f..2be6f26 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.metadata.*; import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -49,8 +50,14 @@ public static CommandProcessor get(String cmd) } public static CommandProcessor getForHiveCommand(String[] cmd, HiveConf conf) - throws SQLException { - HiveCommand hiveCommand = HiveCommand.find(cmd); + throws SQLException { + return getForHiveCommandInternal(cmd, conf, false); + } + + public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf conf, + boolean testOnly) + throws SQLException { + HiveCommand hiveCommand = HiveCommand.find(cmd, testOnly); if (hiveCommand == null || isBlank(cmd[0])) { return null; } @@ -58,7 +65,8 @@ public static CommandProcessor getForHiveCommand(String[] cmd, HiveConf conf) conf = new HiveConf(); } Set availableCommands = new HashSet(); - for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST).split(",")) { + for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST) + .split(",")) { availableCommands.add(availableCommand.toLowerCase().trim()); } if (!availableCommands.contains(cmd[0].trim().toLowerCase())) { @@ -82,6 +90,12 @@ public static CommandProcessor getForHiveCommand(String[] cmd, HiveConf conf) return new CompileProcessor(); case RELOAD: return new ReloadProcessor(); + case CRYPTO: + try { + return new CryptoProcessor(SessionState.get().getHdfsEncryptionShim(), conf); + } catch (HiveException e) { + throw new SQLException("Fail to start the command processor due to the exception: ", e); + } default: throw new AssertionError("Unknown HiveCommand " + hiveCommand); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java new file mode 100644 index 0000000..5eaadbb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java @@ -0,0 +1,184 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.processors; + +import org.apache.commons.cli.Options; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.CommandNeedRetryException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.HadoopShims; + +import java.io.IOException; +import java.util.Arrays; + +/** + * This class processes HADOOP commands used for HDFS encryption. It is meant to be run + * only by Hive unit & queries tests. + */ +public class CryptoProcessor implements CommandProcessor { + public static final Log LOG = LogFactory.getLog(CryptoProcessor.class.getName()); + + private HadoopShims.HdfsEncryptionShim encryptionShim; + + private Options CREATE_KEY_OPTIONS; + private Options DELETE_KEY_OPTIONS; + private Options CREATE_ZONE_OPTIONS; + + private int DEFAULT_BIT_LENGTH = 128; + + private HiveConf conf; + + public CryptoProcessor(HadoopShims.HdfsEncryptionShim encryptionShim, HiveConf conf) { + this.encryptionShim = encryptionShim; + this.conf = conf; + + CREATE_KEY_OPTIONS = new Options(); + CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create()); + CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("bitLength").create()); // optional + + DELETE_KEY_OPTIONS = new Options(); + DELETE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create()); + + CREATE_ZONE_OPTIONS = new Options(); + CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create()); + CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("path").isRequired().create()); + } + + private CommandLine parseCommandArgs(final Options opts, String[] args) throws ParseException { + CommandLineParser parser = new GnuParser(); + return parser.parse(opts, args); + } + + private CommandProcessorResponse returnErrorResponse(final String errmsg) { + return new CommandProcessorResponse(1, "Encryption Processor Helper Failed:" + errmsg, null); + } + + private void writeTestOutput(final String msg) { + SessionState.get().out.println(msg); + } + + @Override + public void init() { + } + + @Override + public CommandProcessorResponse run(String command) throws CommandNeedRetryException { + String[] args = command.split("\\s+"); + + if (args.length < 1) { + return returnErrorResponse("Command arguments are empty."); + } + + if (encryptionShim == null) { + return returnErrorResponse("Hadoop encryption shim is not initialized."); + } + + String action = args[0]; + String params[] = Arrays.copyOfRange(args, 1, args.length); + + try { + if (action.equalsIgnoreCase("create_key")) { + createEncryptionKey(params); + } else if (action.equalsIgnoreCase("create_zone")) { + createEncryptionZone(params); + } else if (action.equalsIgnoreCase("delete_key")) { + deleteEncryptionKey(params); + } else { + return returnErrorResponse("Unknown command action: " + action); + } + } catch (Exception e) { + return returnErrorResponse(e.getMessage()); + } + + return new CommandProcessorResponse(0); + } + + /** + * Creates an encryption key using the parameters passed through the 'create_key' action. + * + * @param params Parameters passed to the 'create_key' command action. + * @throws Exception If key creation failed. + */ + private void createEncryptionKey(String[] params) throws Exception { + CommandLine args = parseCommandArgs(CREATE_KEY_OPTIONS, params); + + String keyName = args.getOptionValue("keyName"); + String bitLength = args.getOptionValue("bitLength", Integer.toString(DEFAULT_BIT_LENGTH)); + + try { + encryptionShim.createKey(keyName, new Integer(bitLength)); + } catch (Exception e) { + throw new Exception("Cannot create encryption key: " + e.getMessage()); + } + + writeTestOutput("Encryption key created: '" + keyName + "'"); + } + + /** + * Creates an encryption zone using the parameters passed through the 'create_zone' action. + * + * @param params Parameters passed to the 'create_zone' command action. + * @throws Exception If zone creation failed. + */ + private void createEncryptionZone(String[] params) throws Exception { + CommandLine args = parseCommandArgs(CREATE_ZONE_OPTIONS, params); + + String keyName = args.getOptionValue("keyName"); + Path cryptoZone = new Path(args.getOptionValue("path")); + if (cryptoZone == null) { + throw new Exception("Cannot create encryption zone: Invalid path '" + + args.getOptionValue("path") + "'"); + } + + try { + encryptionShim.createEncryptionZone(cryptoZone, keyName); + } catch (IOException e) { + throw new Exception("Cannot create encryption zone: " + e.getMessage()); + } + + writeTestOutput("Encryption zone created: '" + cryptoZone + "' using key: '" + keyName + "'"); + } + + /** + * Deletes an encryption key using the parameters passed through the 'delete_key' action. + * + * @param params Parameters passed to the 'delete_key' command action. + * @throws Exception If key deletion failed. + */ + private void deleteEncryptionKey(String[] params) throws Exception { + CommandLine args = parseCommandArgs(DELETE_KEY_OPTIONS, params); + + String keyName = args.getOptionValue("keyName"); + try { + encryptionShim.deleteKey(keyName); + } catch (IOException e) { + throw new Exception("Cannot delete encryption key: " + e.getMessage()); + } + + writeTestOutput("Encryption key deleted: '" + keyName + "'"); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java index 27d8325..319a79b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java @@ -29,18 +29,40 @@ SET(), RESET(), DFS(), + CRYPTO(true), ADD(), LIST(), RELOAD(), DELETE(), COMPILE(); + + public static boolean ONLY_FOR_TESTING = true; + private boolean usedOnlyForTesting; + + HiveCommand() { + this(false); + } + + HiveCommand(boolean onlyForTesting) { + this.usedOnlyForTesting = onlyForTesting; + } + + public boolean isOnlyForTesting() { + return this.usedOnlyForTesting; + } + private static final Set COMMANDS = new HashSet(); static { for (HiveCommand command : HiveCommand.values()) { COMMANDS.add(command.name()); } } + public static HiveCommand find(String[] command) { + return find(command, false); + } + + public static HiveCommand find(String[] command, boolean findOnlyForTesting) { if (null == command){ return null; } @@ -54,7 +76,13 @@ public static HiveCommand find(String[] command) { //special handling for SQL "delete from where..." return null; } else if (COMMANDS.contains(cmd)) { - return HiveCommand.valueOf(cmd); + HiveCommand hiveCommand = HiveCommand.valueOf(cmd); + + if (findOnlyForTesting == hiveCommand.isOnlyForTesting()) { + return hiveCommand; + } + + return null; } } return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index f45b20a..c315985 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactoryImpl; import org.apache.hadoop.hive.ql.util.DosToUnix; +import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; @@ -184,6 +185,11 @@ private SparkSession sparkSession; /** + * Gets information about HDFS encryption + */ + private HadoopShims.HdfsEncryptionShim hdfsEncryptionShim; + + /** * Lineage state. */ LineageState ls; @@ -378,6 +384,23 @@ public boolean isAutoCommit() { return txnAutoCommit; } + public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim() throws HiveException { + if (hdfsEncryptionShim == null) { + try { + FileSystem fs = FileSystem.get(conf); + if ("hdfs".equals(fs.getUri().getScheme())) { + hdfsEncryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); + } else { + LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem."); + } + } catch (Exception e) { + throw new HiveException(e); + } + } + + return hdfsEncryptionShim; + } + /** * Singleton Session object per thread. * @@ -410,7 +433,6 @@ public static void detachSession() { * when switching from one session to another. */ public static SessionState start(SessionState startSs) { - setCurrentSessionState(startSs); if (startSs.hiveHist == null){ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index b8e7c70..7fced7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.CompactionRequest; @@ -262,7 +263,7 @@ private CompactionType determineCompactionType(CompactionInfo ci, ValidTxnList t private long sumDirSize(FileSystem fs, Path dir) throws IOException { long size = 0; - FileStatus[] buckets = fs.listStatus(dir); + FileStatus[] buckets = fs.listStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER); for (int i = 0; i < buckets.length; i++) { size += buckets[i].getLen(); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java index 7f0d12a..6f0b9df 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java @@ -101,7 +101,7 @@ protected void tearDown() throws IOException { } /** - * Test combine symlink text input file. Two input dir, and each contails one + * Test combine symlink text input file. Two input dir, and each contains one * file, and then create one symlink file containing these 2 files. Normally * without combine, it will return at least 2 splits */ @@ -166,7 +166,11 @@ public void testCombine() throws Exception { } String cmd = "select key*1 from " + tblName; - drv.compile(cmd); + ecode = drv.compile(cmd); + if (ecode != 0) { + throw new Exception("Select compile: " + cmd + + " failed with exit code= " + ecode); + } //create scratch dir Context ctx = new Context(newJob); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java index ac5053a..21bdcf4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java @@ -20,15 +20,17 @@ import java.sql.SQLException; -import junit.framework.Assert; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestCommandProcessorFactory { + private final String[] testOnlyCommands = new String[]{"crypto"}; + private HiveConf conf; @Before @@ -38,27 +40,39 @@ public void setUp() throws Exception { @Test public void testInvalidCommands() throws Exception { - Assert.assertNull("Null should have returned null", CommandProcessorFactory.getForHiveCommand(null, conf)); - Assert.assertNull("Blank should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf)); - Assert.assertNull("set role should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf)); - Assert.assertNull("SQL should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf)); + Assert.assertNull("Null should have returned null", + CommandProcessorFactory.getForHiveCommand(null, conf)); + Assert.assertNull("Blank should have returned null", + CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf)); + Assert.assertNull("Set role should have returned null", + CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf)); + Assert.assertNull("SQL should have returned null", + CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf)); + Assert.assertNull("Test only command should have returned null", + CommandProcessorFactory.getForHiveCommand(new String[]{"CRYPTO --listZones"}, conf)); } + @Test public void testAvailableCommands() throws Exception { + enableTestOnlyCmd(conf); SessionState.start(conf); + for (HiveCommand command : HiveCommand.values()) { String cmd = command.name(); - Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf)); - } - for (HiveCommand command : HiveCommand.values()) { - String cmd = command.name().toLowerCase(); - Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf)); + String cmdInLowerCase = cmd.toLowerCase(); + Assert.assertNotNull("Cmd " + cmd + " not return null", + CommandProcessorFactory + .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting())); + Assert.assertNotNull("Cmd " + cmd + " not return null", + CommandProcessorFactory.getForHiveCommandInternal( + new String[]{cmdInLowerCase}, conf, command.isOnlyForTesting())); } conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), ""); for (HiveCommand command : HiveCommand.values()) { String cmd = command.name(); try { - CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf); + CommandProcessorFactory + .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting()); Assert.fail("Expected SQLException for " + cmd + " as available commands is empty"); } catch (SQLException e) { Assert.assertEquals("Insufficient privileges to execute " + cmd, e.getMessage()); @@ -66,4 +80,13 @@ public void testAvailableCommands() throws Exception { } } } + + private void enableTestOnlyCmd(HiveConf conf){ + StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST)); + for(String c : testOnlyCommands){ + securityCMDs.append(","); + securityCMDs.append(c); + } + conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString()); + } } diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q new file mode 100644 index 0000000..8674972 --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q @@ -0,0 +1,57 @@ +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +-- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable; +drop table IF EXISTS unencryptedTable; + +create table encryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc + LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true'); +CRYPTO CREATE_KEY --keyName key_1 --bitLength 128; +CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable; + +create table unencryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +-- insert encrypted table from values +explain extended insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502'); + +insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502'); + +select * from encryptedTable order by key; + +-- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition (key) + select value, key limit 2; + +from src +insert into table encryptedTable partition (key) + select value, key limit 2; + +select * from encryptedTable order by key; + +-- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition (key) + select value, key; + +from encryptedTable +insert into table unencryptedTable partition (key) + select value, key; + +select * from unencryptedTable order by key; + +-- clean up +drop table encryptedTable; +CRYPTO DELETE_KEY --keyName key_1; +drop table unencryptedTable; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q new file mode 100644 index 0000000..10fb965 --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q @@ -0,0 +1,62 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +-- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable; +drop table IF EXISTS unencryptedTable; + +create table encryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc + LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true'); +CRYPTO CREATE_KEY --keyName key_1 --bitLength 128; +CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable; + +create table unencryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +-- insert encrypted table from values +explain extended insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502'); + +insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502'); + +select * from encryptedTable order by key; + +-- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2; + +from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2; + +select * from encryptedTable order by key; + +-- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value; + +from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value; + +select * from unencryptedTable order by key; + +-- clean up +drop table encryptedTable; +CRYPTO DELETE_KEY --keyName key_1; +drop table unencryptedTable; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q b/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q new file mode 100644 index 0000000..adf82de --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q @@ -0,0 +1,15 @@ +--SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table; +CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; +CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; +CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; + +INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src; + +SELECT * FROM encrypted_table; + +EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WHERE t1.key = t2.key; + +drop table encrypted_table; +CRYPTO DELETE_KEY --keyName key_128; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q b/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q new file mode 100644 index 0000000..c830247 --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q @@ -0,0 +1,24 @@ +--SORT_QUERY_RESULTS + +-- Java JCE must be installed in order to hava a key length of 256 bits +DROP TABLE IF EXISTS table_key_1; +CREATE TABLE table_key_1 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/table_key_1'; +CRYPTO CREATE_KEY --keyName key_1 --bitLength 128; +CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/table_key_1; + +DROP TABLE IF EXISTS table_key_2; +CREATE TABLE table_key_2 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/table_key_2'; +CRYPTO CREATE_KEY --keyName key_2 --bitLength 256; +CRYPTO CREATE_ZONE --keyName key_2 --path ${hiveconf:hive.metastore.warehouse.dir}/table_key_2; + +INSERT OVERWRITE TABLE table_key_1 SELECT * FROM src; +INSERT OVERWRITE TABLE table_key_2 SELECT * FROM src; + +EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key); +SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key); + +DROP TABLE table_key_1; +DROP TABLE table_key_2; + +CRYPTO DELETE_KEY --keyName key_1; +CRYPTO DELETE_KEY --keyName key_2; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q new file mode 100644 index 0000000..e4d3a4c --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS encrypted_table; + +CREATE TABLE encrypted_table (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table'; + +-- Create encryption key and zone; +crypto create_key --keyName key1; +crypto create_zone --keyName key1 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table; + +-- Test loading data from the local filesystem; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table; +SELECT * FROM encrypted_table; + +-- Test loading data from the hdfs filesystem; +dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt; +LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table; +SELECT * FROM encrypted_table; + +DROP TABLE encrypted_table; + +crypto delete_key --keyName key1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q new file mode 100644 index 0000000..c013425 --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q @@ -0,0 +1,16 @@ +-- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table; +CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table'; + +CRYPTO CREATE_KEY --keyName key_128 --bitLength 128; +CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; + +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table; + +dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table; + +SELECT count(*) FROM encrypted_table; + +drop table encrypted_table; +CRYPTO DELETE_KEY --keyName key_128; diff --git a/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q b/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q new file mode 100644 index 0000000..732740e --- /dev/null +++ b/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q @@ -0,0 +1,12 @@ +-- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS unencrypted_table; +CREATE TABLE unencrypted_table(key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/unencrypted_table'; + +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE unencrypted_table; + +dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/unencrypted_table; + +SELECT count(*) FROM unencrypted_table; + +drop table unencrypted_table; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q index 48ba8e8..9300638 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_11.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_11.q @@ -29,6 +29,13 @@ INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1'; +SELECT * FROM test_table1 ORDER BY key; +SELECT * FROM test_table3 ORDER BY key; +EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16); +EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16); +SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16); +SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16); + -- Join data from a sampled bucket to verify the data is bucketed SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1'; diff --git a/ql/src/test/results/clientnegative/fs_default_name2.q.out b/ql/src/test/results/clientnegative/fs_default_name2.q.out index 97477ee..b68b7ee 100644 --- a/ql/src/test/results/clientnegative/fs_default_name2.q.out +++ b/ql/src/test/results/clientnegative/fs_default_name2.q.out @@ -1 +1 @@ -FAILED: IllegalArgumentException Illegal character in scheme name at index 0: 'http://www.example.com +FAILED: SemanticException java.lang.IllegalArgumentException: Illegal character in scheme name at index 0: 'http://www.example.com diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out new file mode 100644 index 0000000..b6e7b88 --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out @@ -0,0 +1,896 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table IF EXISTS unencryptedTable +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table IF EXISTS unencryptedTable +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table encryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@encryptedTable +POSTHOOK: query: create table encryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@encryptedTable +Encryption key created: 'key_1' +Encryption zone created: '/build/ql/test/data/warehouse/encryptedTable' using key: 'key_1' +PREHOOK: query: create table unencryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@unencryptedTable +POSTHOOK: query: create table unencryptedTable(value string) + partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@unencryptedTable +PREHOOK: query: -- insert encrypted table from values +explain extended insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502') +PREHOOK: type: QUERY +POSTHOOK: query: -- insert encrypted table from values +explain extended insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502') +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + null + null + Values__Tmp__Table__1 + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + encryptedTable + TOK_PARTSPEC + TOK_PARTVAL + key + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + GatherStats: false + Select Operator + expressions: tmp_values_col1 (type: string), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: Values__Tmp__Table__1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns tmp_values_col1,tmp_values_col2 + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.values__tmp__table__1 + serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns tmp_values_col1,tmp_values_col2 + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.values__tmp__table__1 + serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.values__tmp__table__1 + name: default.values__tmp__table__1 + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + NumFilesPerFileSink: 1 + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + key + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + + Stage: Stage-2 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + +PREHOOK: query: insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@encryptedtable +POSTHOOK: query: insert into table encryptedTable partition (key) values + ('val_501', '501'), + ('val_502', '502') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@encryptedtable@key=501 +POSTHOOK: Output: default@encryptedtable@key=502 +POSTHOOK: Lineage: encryptedtable PARTITION(key=501).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: encryptedtable PARTITION(key=502).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from encryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@key=501 +PREHOOK: Input: default@encryptedtable@key=502 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +POSTHOOK: query: select * from encryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@key=501 +POSTHOOK: Input: default@encryptedtable@key=502 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +val_501 501 +val_502 502 +PREHOOK: query: -- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition (key) + select value, key limit 2 +PREHOOK: type: QUERY +POSTHOOK: query: -- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition (key) + select value, key limit 2 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + src + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + encryptedTable + TOK_PARTSPEC + TOK_PARTVAL + key + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + value + TOK_SELEXPR + TOK_TABLE_OR_COL + key + TOK_LIMIT + 2 + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: value (type: string), key (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: src + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [$hdt$_0:src] + Needs Tagging: false + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -mr-10001 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + NumFilesPerFileSink: 1 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + key + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + + Stage: Stage-3 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging + +PREHOOK: query: from src +insert into table encryptedTable partition (key) + select value, key limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@encryptedtable +POSTHOOK: query: from src +insert into table encryptedTable partition (key) + select value, key limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@encryptedtable@key=238 +POSTHOOK: Output: default@encryptedtable@key=86 +POSTHOOK: Lineage: encryptedtable PARTITION(key=238).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: encryptedtable PARTITION(key=86).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from encryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@key=238 +PREHOOK: Input: default@encryptedtable@key=501 +PREHOOK: Input: default@encryptedtable@key=502 +PREHOOK: Input: default@encryptedtable@key=86 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +POSTHOOK: query: select * from encryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@key=238 +POSTHOOK: Input: default@encryptedtable@key=501 +POSTHOOK: Input: default@encryptedtable@key=502 +POSTHOOK: Input: default@encryptedtable@key=86 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +val_238 238 +val_501 501 +val_502 502 +val_86 86 +PREHOOK: query: -- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition (key) + select value, key +PREHOOK: type: QUERY +POSTHOOK: query: -- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition (key) + select value, key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + encryptedTable + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + unencryptedTable + TOK_PARTSPEC + TOK_PARTVAL + key + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + value + TOK_SELEXPR + TOK_TABLE_OR_COL + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: encryptedtable + Statistics: Num rows: 21 Data size: 2372 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: value (type: string), key (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 21 Data size: 2372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 21 Data size: 2372 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: key=238 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + key 238 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 1 + numRows 0 + partition_columns key + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 600 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable +#### A masked pattern was here #### + Partition + base file name: key=501 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + key 501 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 1 + numRows 0 + partition_columns key + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 592 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable +#### A masked pattern was here #### + Partition + base file name: key=502 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + key 502 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 1 + numRows 0 + partition_columns key + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 592 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable +#### A masked pattern was here #### + Partition + base file name: key=86 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + key 86 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 1 + numRows 0 + partition_columns key + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 588 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct encryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable + Truncated Path -> Alias: + /encryptedTable/key=238 [encryptedtable] + /encryptedTable/key=501 [encryptedtable] + /encryptedTable/key=502 [encryptedtable] + /encryptedTable/key=86 [encryptedtable] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 21 Data size: 2372 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/.hive-staging + NumFilesPerFileSink: 1 + Statistics: Num rows: 21 Data size: 2372 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.unencryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct unencryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.unencryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + key + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name value + columns value + columns.comments + columns.types string +#### A masked pattern was here #### + name default.unencryptedtable + partition_columns key + partition_columns.types string + serialization.ddl struct unencryptedtable { string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.unencryptedtable + + Stage: Stage-2 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/.hive-staging + +PREHOOK: query: from encryptedTable +insert into table unencryptedTable partition (key) + select value, key +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@key=238 +PREHOOK: Input: default@encryptedtable@key=501 +PREHOOK: Input: default@encryptedtable@key=502 +PREHOOK: Input: default@encryptedtable@key=86 +PREHOOK: Output: default@unencryptedtable +POSTHOOK: query: from encryptedTable +insert into table unencryptedTable partition (key) + select value, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@key=238 +POSTHOOK: Input: default@encryptedtable@key=501 +POSTHOOK: Input: default@encryptedtable@key=502 +POSTHOOK: Input: default@encryptedtable@key=86 +POSTHOOK: Output: default@unencryptedtable@key=238 +POSTHOOK: Output: default@unencryptedtable@key=501 +POSTHOOK: Output: default@unencryptedtable@key=502 +POSTHOOK: Output: default@unencryptedtable@key=86 +POSTHOOK: Lineage: unencryptedtable PARTITION(key=238).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: unencryptedtable PARTITION(key=501).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: unencryptedtable PARTITION(key=502).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: unencryptedtable PARTITION(key=86).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from unencryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@unencryptedtable +PREHOOK: Input: default@unencryptedtable@key=238 +PREHOOK: Input: default@unencryptedtable@key=501 +PREHOOK: Input: default@unencryptedtable@key=502 +PREHOOK: Input: default@unencryptedtable@key=86 +#### A masked pattern was here #### +POSTHOOK: query: select * from unencryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@unencryptedtable +POSTHOOK: Input: default@unencryptedtable@key=238 +POSTHOOK: Input: default@unencryptedtable@key=501 +POSTHOOK: Input: default@unencryptedtable@key=502 +POSTHOOK: Input: default@unencryptedtable@key=86 +#### A masked pattern was here #### +val_238 238 +val_501 501 +val_502 502 +val_86 86 +PREHOOK: query: -- clean up +drop table encryptedTable +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@encryptedtable +PREHOOK: Output: default@encryptedtable +POSTHOOK: query: -- clean up +drop table encryptedTable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Output: default@encryptedtable +Encryption key deleted: 'key_1' +PREHOOK: query: drop table unencryptedTable +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@unencryptedtable +PREHOOK: Output: default@unencryptedtable +POSTHOOK: query: drop table unencryptedTable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@unencryptedtable +POSTHOOK: Output: default@unencryptedtable diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out new file mode 100644 index 0000000..fc6d2ae --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out @@ -0,0 +1,789 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +-- init +drop table IF EXISTS encryptedTable +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table IF EXISTS unencryptedTable +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table IF EXISTS unencryptedTable +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table encryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@encryptedTable +POSTHOOK: query: create table encryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@encryptedTable +Encryption key created: 'key_1' +Encryption zone created: '/build/ql/test/data/warehouse/encryptedTable' using key: 'key_1' +PREHOOK: query: create table unencryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@unencryptedTable +POSTHOOK: query: create table unencryptedTable(key string, + value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@unencryptedTable +PREHOOK: query: -- insert encrypted table from values +explain extended insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502') +PREHOOK: type: QUERY +POSTHOOK: query: -- insert encrypted table from values +explain extended insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502') +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + null + null + Values__Tmp__Table__1 + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + encryptedTable + TOK_PARTSPEC + TOK_PARTVAL + ds + 'today' + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: values__tmp__table__1 + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + GatherStats: false + Select Operator + expressions: tmp_values_col1 (type: string), tmp_values_col2 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: Values__Tmp__Table__1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns tmp_values_col1,tmp_values_col2 + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.values__tmp__table__1 + serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns tmp_values_col1,tmp_values_col2 + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.values__tmp__table__1 + serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.values__tmp__table__1 + name: default.values__tmp__table__1 + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging + NumFilesPerFileSink: 1 + Static Partition Specification: ds=today/ + Statistics: Num rows: 0 Data size: 24 Basic stats: PARTIAL Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + ds today + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + + Stage: Stage-2 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging + +PREHOOK: query: insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@encryptedtable@ds=today +POSTHOOK: query: insert into table encryptedTable partition + (ds='today') values + ('501', 'val_501'), + ('502', 'val_502') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@encryptedtable@ds=today +POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).key SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from encryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@ds=today +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +POSTHOOK: query: select * from encryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@ds=today +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +501 val_501 today +502 val_502 today +PREHOOK: query: -- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2 +PREHOOK: type: QUERY +POSTHOOK: query: -- insert encrypted table from unencrypted source +explain extended from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + src + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + encryptedTable + TOK_PARTSPEC + TOK_PARTVAL + ds + 'yesterday' + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + TOK_LIMIT + 2 + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: src + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /src [src] + Needs Tagging: false + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 2 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -mr-10001 + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + properties: + columns _col0,_col1 + columns.types string,string + escape.delim \ + serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Truncated Path -> Alias: +#### A masked pattern was here #### + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging + NumFilesPerFileSink: 1 + Static Partition Specification: ds=yesterday/ + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + ds yesterday + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + + Stage: Stage-3 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging + +PREHOOK: query: from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@encryptedtable@ds=yesterday +POSTHOOK: query: from src +insert into table encryptedTable partition + (ds='yesterday') + select * limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@encryptedtable@ds=yesterday +POSTHOOK: Lineage: encryptedtable PARTITION(ds=yesterday).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: encryptedtable PARTITION(ds=yesterday).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from encryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@ds=today +PREHOOK: Input: default@encryptedtable@ds=yesterday +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +POSTHOOK: query: select * from encryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@ds=today +POSTHOOK: Input: default@encryptedtable@ds=yesterday +#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging +238 val_238 yesterday +501 val_501 today +502 val_502 today +86 val_86 yesterday +PREHOOK: query: -- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value +PREHOOK: type: QUERY +POSTHOOK: query: -- insert unencrypted table from encrypted source +explain extended from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + encryptedTable + TOK_INSERT + TOK_INSERT_INTO + TOK_TAB + TOK_TABNAME + unencryptedTable + TOK_PARTSPEC + TOK_PARTVAL + ds + 'today' + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + key + TOK_SELEXPR + TOK_TABLE_OR_COL + value + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: encryptedtable + Statistics: Num rows: 12 Data size: 2695 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 2695 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 12 Data size: 2695 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: string), _col1 (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=today + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + ds today + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 1351 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable +#### A masked pattern was here #### + Partition + base file name: ds=yesterday + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + ds yesterday + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 1344 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.encryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct encryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.encryptedtable + name: default.encryptedtable + Truncated Path -> Alias: + /encryptedTable/ds=today [encryptedtable] + /encryptedTable/ds=yesterday [encryptedtable] + Needs Tagging: false + Reduce Operator Tree: + Extract + Statistics: Num rows: 12 Data size: 2695 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging + NumFilesPerFileSink: 1 + Static Partition Specification: ds=today/ + Statistics: Num rows: 12 Data size: 2695 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.unencryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct unencryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.unencryptedtable + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false + + Stage: Stage-0 + Move Operator + tables: + partition: + ds today + replace: false +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.unencryptedtable + partition_columns ds + partition_columns.types string + serialization.ddl struct unencryptedtable { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + transactional true +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.unencryptedtable + + Stage: Stage-2 + Stats-Aggr Operator +#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging + +PREHOOK: query: from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@encryptedtable +PREHOOK: Input: default@encryptedtable@ds=today +PREHOOK: Input: default@encryptedtable@ds=yesterday +PREHOOK: Output: default@unencryptedtable@ds=today +POSTHOOK: query: from encryptedTable +insert into table unencryptedTable partition + (ds='today') + select key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Input: default@encryptedtable@ds=today +POSTHOOK: Input: default@encryptedtable@ds=yesterday +POSTHOOK: Output: default@unencryptedtable@ds=today +POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).key SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from unencryptedTable order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@unencryptedtable +PREHOOK: Input: default@unencryptedtable@ds=today +#### A masked pattern was here #### +POSTHOOK: query: select * from unencryptedTable order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@unencryptedtable +POSTHOOK: Input: default@unencryptedtable@ds=today +#### A masked pattern was here #### +238 val_238 today +501 val_501 today +502 val_502 today +86 val_86 today +PREHOOK: query: -- clean up +drop table encryptedTable +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@encryptedtable +PREHOOK: Output: default@encryptedtable +POSTHOOK: query: -- clean up +drop table encryptedTable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@encryptedtable +POSTHOOK: Output: default@encryptedtable +Encryption key deleted: 'key_1' +PREHOOK: query: drop table unencryptedTable +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@unencryptedtable +PREHOOK: Output: default@unencryptedtable +POSTHOOK: query: drop table unencryptedTable +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@unencryptedtable +POSTHOOK: Output: default@unencryptedtable diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out new file mode 100644 index 0000000..99f4c17 --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out @@ -0,0 +1,763 @@ +PREHOOK: query: --SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: --SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@encrypted_table +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@encrypted_table +Encryption key created: 'key_128' +Encryption zone created: '/build/ql/test/data/warehouse/default/encrypted_table' using key: 'key_128' +PREHOOK: query: INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@encrypted_table +POSTHOOK: Lineage: encrypted_table.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: encrypted_table.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: SELECT * FROM encrypted_table +PREHOOK: type: QUERY +PREHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging +POSTHOOK: query: SELECT * FROM encrypted_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WHERE t1.key = t2.key +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WHERE t1.key = t2.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + src + t1 + TOK_TABREF + TOK_TABNAME + encrypted_table + t2 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + TOK_WHERE + = + . + TOK_TABLE_OR_COL + t1 + key + . + TOK_TABLE_OR_COL + t2 + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE + tag: 0 + value expressions: key (type: string), value (type: string) + auto parallelism: false + TableScan + alias: t2 + Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + tag: 1 + value expressions: key (type: int), value (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: encrypted_table + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.encrypted_table + numFiles 0 + numRows 0 + rawDataSize 0 + serialization.ddl struct encrypted_table { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 0 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.encrypted_table + numFiles 0 + numRows 0 + rawDataSize 0 + serialization.ddl struct encrypted_table { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 0 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.encrypted_table + name: default.encrypted_table +#### A masked pattern was here #### + Partition + base file name: src + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments 'default','default' + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src + Truncated Path -> Alias: + /default/encrypted_table [t2] + /src [t1] + Needs Tagging: true + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 UDFToDouble(key) (type: double) + 1 UDFToDouble(key) (type: double) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE + Filter Operator + isSamplingPred: false + predicate: (_col0 = _col5) (type: boolean) + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging + NumFilesPerFileSink: 1 + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:int:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: drop table encrypted_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@encrypted_table +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: drop table encrypted_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@encrypted_table +POSTHOOK: Output: default@encrypted_table +Encryption key deleted: 'key_128' diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out new file mode 100644 index 0000000..c66aacb --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out @@ -0,0 +1,1330 @@ +PREHOOK: query: --SORT_QUERY_RESULTS + +-- Java JCE must be installed in order to hava a key length of 256 bits +DROP TABLE IF EXISTS table_key_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: --SORT_QUERY_RESULTS + +-- Java JCE must be installed in order to hava a key length of 256 bits +DROP TABLE IF EXISTS table_key_1 +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@table_key_1 +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table_key_1 +Encryption key created: 'key_1' +Encryption zone created: '/build/ql/test/data/warehouse/table_key_1' using key: 'key_1' +PREHOOK: query: DROP TABLE IF EXISTS table_key_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS table_key_2 +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@table_key_2 +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table_key_2 +Encryption key created: 'key_2' +Encryption zone created: '/build/ql/test/data/warehouse/table_key_2' using key: 'key_2' +PREHOOK: query: INSERT OVERWRITE TABLE table_key_1 SELECT * FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@table_key_1 +POSTHOOK: query: INSERT OVERWRITE TABLE table_key_1 SELECT * FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@table_key_1 +POSTHOOK: Lineage: table_key_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_key_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: INSERT OVERWRITE TABLE table_key_2 SELECT * FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@table_key_2 +POSTHOOK: query: INSERT OVERWRITE TABLE table_key_2 SELECT * FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@table_key_2 +POSTHOOK: Lineage: table_key_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: table_key_2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + table_key_1 + t1 + TOK_TABREF + TOK_TABNAME + table_key_2 + t2 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + TOK_WHERE + = + . + TOK_TABLE_OR_COL + t1 + key + . + TOK_TABLE_OR_COL + t2 + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + tag: 0 + value expressions: value (type: string) + auto parallelism: false + TableScan + alias: t2 + Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 28 Data size: 2958 Basic stats: COMPLETE Column stats: NONE + tag: 1 + value expressions: value (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: table_key_1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.table_key_1 + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct table_key_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.table_key_1 + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct table_key_1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table_key_1 + name: default.table_key_1 +#### A masked pattern was here #### + Partition + base file name: table_key_2 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.table_key_2 + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct table_key_2 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.table_key_2 + numFiles 1 + numRows 0 + rawDataSize 0 + serialization.ddl struct table_key_2 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.table_key_2 + name: default.table_key_2 + Truncated Path -> Alias: + /table_key_1 [t1] + /table_key_2 [t2] + Needs Tagging: true + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE + Filter Operator + isSamplingPred: false + predicate: (_col0 = _col5) (type: boolean) + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A PARTIAL masked pattern was here #### data/warehouse/table_key_2/.hive-staging + NumFilesPerFileSink: 1 + Statistics: Num rows: 15 Data size: 1626 Basic stats: COMPLETE Column stats: NONE +#### A PARTIAL masked pattern was here #### data/warehouse/table_key_2/.hive-staging + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types int:string:int:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@table_key_1 +PREHOOK: Input: default@table_key_2 +#### A PARTIAL masked pattern was here #### data/warehouse/table_key_2/.hive-staging +POSTHOOK: query: SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table_key_1 +POSTHOOK: Input: default@table_key_2 +#### A PARTIAL masked pattern was here #### data/warehouse/table_key_2/.hive-staging +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +0 val_0 0 val_0 +2 val_2 2 val_2 +4 val_4 4 val_4 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +8 val_8 8 val_8 +9 val_9 9 val_9 +10 val_10 10 val_10 +11 val_11 11 val_11 +12 val_12 12 val_12 +12 val_12 12 val_12 +12 val_12 12 val_12 +12 val_12 12 val_12 +15 val_15 15 val_15 +15 val_15 15 val_15 +15 val_15 15 val_15 +15 val_15 15 val_15 +17 val_17 17 val_17 +18 val_18 18 val_18 +18 val_18 18 val_18 +18 val_18 18 val_18 +18 val_18 18 val_18 +19 val_19 19 val_19 +20 val_20 20 val_20 +24 val_24 24 val_24 +24 val_24 24 val_24 +24 val_24 24 val_24 +24 val_24 24 val_24 +26 val_26 26 val_26 +26 val_26 26 val_26 +26 val_26 26 val_26 +26 val_26 26 val_26 +27 val_27 27 val_27 +28 val_28 28 val_28 +30 val_30 30 val_30 +33 val_33 33 val_33 +34 val_34 34 val_34 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +35 val_35 35 val_35 +37 val_37 37 val_37 +37 val_37 37 val_37 +37 val_37 37 val_37 +37 val_37 37 val_37 +41 val_41 41 val_41 +42 val_42 42 val_42 +42 val_42 42 val_42 +42 val_42 42 val_42 +42 val_42 42 val_42 +43 val_43 43 val_43 +44 val_44 44 val_44 +47 val_47 47 val_47 +51 val_51 51 val_51 +51 val_51 51 val_51 +51 val_51 51 val_51 +51 val_51 51 val_51 +53 val_53 53 val_53 +54 val_54 54 val_54 +57 val_57 57 val_57 +58 val_58 58 val_58 +58 val_58 58 val_58 +58 val_58 58 val_58 +58 val_58 58 val_58 +64 val_64 64 val_64 +65 val_65 65 val_65 +66 val_66 66 val_66 +67 val_67 67 val_67 +67 val_67 67 val_67 +67 val_67 67 val_67 +67 val_67 67 val_67 +69 val_69 69 val_69 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +70 val_70 70 val_70 +72 val_72 72 val_72 +72 val_72 72 val_72 +72 val_72 72 val_72 +72 val_72 72 val_72 +74 val_74 74 val_74 +76 val_76 76 val_76 +76 val_76 76 val_76 +76 val_76 76 val_76 +76 val_76 76 val_76 +77 val_77 77 val_77 +78 val_78 78 val_78 +80 val_80 80 val_80 +82 val_82 82 val_82 +83 val_83 83 val_83 +83 val_83 83 val_83 +83 val_83 83 val_83 +83 val_83 83 val_83 +84 val_84 84 val_84 +84 val_84 84 val_84 +84 val_84 84 val_84 +84 val_84 84 val_84 +85 val_85 85 val_85 +86 val_86 86 val_86 +87 val_87 87 val_87 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +90 val_90 90 val_90 +92 val_92 92 val_92 +95 val_95 95 val_95 +95 val_95 95 val_95 +95 val_95 95 val_95 +95 val_95 95 val_95 +96 val_96 96 val_96 +97 val_97 97 val_97 +97 val_97 97 val_97 +97 val_97 97 val_97 +97 val_97 97 val_97 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +98 val_98 98 val_98 +100 val_100 100 val_100 +100 val_100 100 val_100 +100 val_100 100 val_100 +100 val_100 100 val_100 +103 val_103 103 val_103 +103 val_103 103 val_103 +103 val_103 103 val_103 +103 val_103 103 val_103 +104 val_104 104 val_104 +104 val_104 104 val_104 +104 val_104 104 val_104 +104 val_104 104 val_104 +105 val_105 105 val_105 +111 val_111 111 val_111 +113 val_113 113 val_113 +113 val_113 113 val_113 +113 val_113 113 val_113 +113 val_113 113 val_113 +114 val_114 114 val_114 +116 val_116 116 val_116 +118 val_118 118 val_118 +118 val_118 118 val_118 +118 val_118 118 val_118 +118 val_118 118 val_118 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +119 val_119 119 val_119 +120 val_120 120 val_120 +120 val_120 120 val_120 +120 val_120 120 val_120 +120 val_120 120 val_120 +125 val_125 125 val_125 +125 val_125 125 val_125 +125 val_125 125 val_125 +125 val_125 125 val_125 +126 val_126 126 val_126 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +128 val_128 128 val_128 +129 val_129 129 val_129 +129 val_129 129 val_129 +129 val_129 129 val_129 +129 val_129 129 val_129 +131 val_131 131 val_131 +133 val_133 133 val_133 +134 val_134 134 val_134 +134 val_134 134 val_134 +134 val_134 134 val_134 +134 val_134 134 val_134 +136 val_136 136 val_136 +137 val_137 137 val_137 +137 val_137 137 val_137 +137 val_137 137 val_137 +137 val_137 137 val_137 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +138 val_138 138 val_138 +143 val_143 143 val_143 +145 val_145 145 val_145 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +146 val_146 146 val_146 +149 val_149 149 val_149 +149 val_149 149 val_149 +149 val_149 149 val_149 +149 val_149 149 val_149 +150 val_150 150 val_150 +152 val_152 152 val_152 +152 val_152 152 val_152 +152 val_152 152 val_152 +152 val_152 152 val_152 +153 val_153 153 val_153 +155 val_155 155 val_155 +156 val_156 156 val_156 +157 val_157 157 val_157 +158 val_158 158 val_158 +160 val_160 160 val_160 +162 val_162 162 val_162 +163 val_163 163 val_163 +164 val_164 164 val_164 +164 val_164 164 val_164 +164 val_164 164 val_164 +164 val_164 164 val_164 +165 val_165 165 val_165 +165 val_165 165 val_165 +165 val_165 165 val_165 +165 val_165 165 val_165 +166 val_166 166 val_166 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +167 val_167 167 val_167 +168 val_168 168 val_168 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +169 val_169 169 val_169 +170 val_170 170 val_170 +172 val_172 172 val_172 +172 val_172 172 val_172 +172 val_172 172 val_172 +172 val_172 172 val_172 +174 val_174 174 val_174 +174 val_174 174 val_174 +174 val_174 174 val_174 +174 val_174 174 val_174 +175 val_175 175 val_175 +175 val_175 175 val_175 +175 val_175 175 val_175 +175 val_175 175 val_175 +176 val_176 176 val_176 +176 val_176 176 val_176 +176 val_176 176 val_176 +176 val_176 176 val_176 +177 val_177 177 val_177 +178 val_178 178 val_178 +179 val_179 179 val_179 +179 val_179 179 val_179 +179 val_179 179 val_179 +179 val_179 179 val_179 +180 val_180 180 val_180 +181 val_181 181 val_181 +183 val_183 183 val_183 +186 val_186 186 val_186 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +187 val_187 187 val_187 +189 val_189 189 val_189 +190 val_190 190 val_190 +191 val_191 191 val_191 +191 val_191 191 val_191 +191 val_191 191 val_191 +191 val_191 191 val_191 +192 val_192 192 val_192 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +193 val_193 193 val_193 +194 val_194 194 val_194 +195 val_195 195 val_195 +195 val_195 195 val_195 +195 val_195 195 val_195 +195 val_195 195 val_195 +196 val_196 196 val_196 +197 val_197 197 val_197 +197 val_197 197 val_197 +197 val_197 197 val_197 +197 val_197 197 val_197 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +199 val_199 199 val_199 +200 val_200 200 val_200 +200 val_200 200 val_200 +200 val_200 200 val_200 +200 val_200 200 val_200 +201 val_201 201 val_201 +202 val_202 202 val_202 +203 val_203 203 val_203 +203 val_203 203 val_203 +203 val_203 203 val_203 +203 val_203 203 val_203 +205 val_205 205 val_205 +205 val_205 205 val_205 +205 val_205 205 val_205 +205 val_205 205 val_205 +207 val_207 207 val_207 +207 val_207 207 val_207 +207 val_207 207 val_207 +207 val_207 207 val_207 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +208 val_208 208 val_208 +209 val_209 209 val_209 +209 val_209 209 val_209 +209 val_209 209 val_209 +209 val_209 209 val_209 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +213 val_213 213 val_213 +214 val_214 214 val_214 +216 val_216 216 val_216 +216 val_216 216 val_216 +216 val_216 216 val_216 +216 val_216 216 val_216 +217 val_217 217 val_217 +217 val_217 217 val_217 +217 val_217 217 val_217 +217 val_217 217 val_217 +218 val_218 218 val_218 +219 val_219 219 val_219 +219 val_219 219 val_219 +219 val_219 219 val_219 +219 val_219 219 val_219 +221 val_221 221 val_221 +221 val_221 221 val_221 +221 val_221 221 val_221 +221 val_221 221 val_221 +222 val_222 222 val_222 +223 val_223 223 val_223 +223 val_223 223 val_223 +223 val_223 223 val_223 +223 val_223 223 val_223 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +224 val_224 224 val_224 +226 val_226 226 val_226 +228 val_228 228 val_228 +229 val_229 229 val_229 +229 val_229 229 val_229 +229 val_229 229 val_229 +229 val_229 229 val_229 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +230 val_230 230 val_230 +233 val_233 233 val_233 +233 val_233 233 val_233 +233 val_233 233 val_233 +233 val_233 233 val_233 +235 val_235 235 val_235 +237 val_237 237 val_237 +237 val_237 237 val_237 +237 val_237 237 val_237 +237 val_237 237 val_237 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +238 val_238 238 val_238 +239 val_239 239 val_239 +239 val_239 239 val_239 +239 val_239 239 val_239 +239 val_239 239 val_239 +241 val_241 241 val_241 +242 val_242 242 val_242 +242 val_242 242 val_242 +242 val_242 242 val_242 +242 val_242 242 val_242 +244 val_244 244 val_244 +247 val_247 247 val_247 +248 val_248 248 val_248 +249 val_249 249 val_249 +252 val_252 252 val_252 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +255 val_255 255 val_255 +256 val_256 256 val_256 +256 val_256 256 val_256 +256 val_256 256 val_256 +256 val_256 256 val_256 +257 val_257 257 val_257 +258 val_258 258 val_258 +260 val_260 260 val_260 +262 val_262 262 val_262 +263 val_263 263 val_263 +265 val_265 265 val_265 +265 val_265 265 val_265 +265 val_265 265 val_265 +265 val_265 265 val_265 +266 val_266 266 val_266 +272 val_272 272 val_272 +272 val_272 272 val_272 +272 val_272 272 val_272 +272 val_272 272 val_272 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +274 val_274 274 val_274 +275 val_275 275 val_275 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +277 val_277 277 val_277 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +278 val_278 278 val_278 +280 val_280 280 val_280 +280 val_280 280 val_280 +280 val_280 280 val_280 +280 val_280 280 val_280 +281 val_281 281 val_281 +281 val_281 281 val_281 +281 val_281 281 val_281 +281 val_281 281 val_281 +282 val_282 282 val_282 +282 val_282 282 val_282 +282 val_282 282 val_282 +282 val_282 282 val_282 +283 val_283 283 val_283 +284 val_284 284 val_284 +285 val_285 285 val_285 +286 val_286 286 val_286 +287 val_287 287 val_287 +288 val_288 288 val_288 +288 val_288 288 val_288 +288 val_288 288 val_288 +288 val_288 288 val_288 +289 val_289 289 val_289 +291 val_291 291 val_291 +292 val_292 292 val_292 +296 val_296 296 val_296 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +298 val_298 298 val_298 +302 val_302 302 val_302 +305 val_305 305 val_305 +306 val_306 306 val_306 +307 val_307 307 val_307 +307 val_307 307 val_307 +307 val_307 307 val_307 +307 val_307 307 val_307 +308 val_308 308 val_308 +309 val_309 309 val_309 +309 val_309 309 val_309 +309 val_309 309 val_309 +309 val_309 309 val_309 +310 val_310 310 val_310 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +315 val_315 315 val_315 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +316 val_316 316 val_316 +317 val_317 317 val_317 +317 val_317 317 val_317 +317 val_317 317 val_317 +317 val_317 317 val_317 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +318 val_318 318 val_318 +321 val_321 321 val_321 +321 val_321 321 val_321 +321 val_321 321 val_321 +321 val_321 321 val_321 +322 val_322 322 val_322 +322 val_322 322 val_322 +322 val_322 322 val_322 +322 val_322 322 val_322 +323 val_323 323 val_323 +325 val_325 325 val_325 +325 val_325 325 val_325 +325 val_325 325 val_325 +325 val_325 325 val_325 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +327 val_327 327 val_327 +331 val_331 331 val_331 +331 val_331 331 val_331 +331 val_331 331 val_331 +331 val_331 331 val_331 +332 val_332 332 val_332 +333 val_333 333 val_333 +333 val_333 333 val_333 +333 val_333 333 val_333 +333 val_333 333 val_333 +335 val_335 335 val_335 +336 val_336 336 val_336 +338 val_338 338 val_338 +339 val_339 339 val_339 +341 val_341 341 val_341 +342 val_342 342 val_342 +342 val_342 342 val_342 +342 val_342 342 val_342 +342 val_342 342 val_342 +344 val_344 344 val_344 +344 val_344 344 val_344 +344 val_344 344 val_344 +344 val_344 344 val_344 +345 val_345 345 val_345 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +348 val_348 348 val_348 +351 val_351 351 val_351 +353 val_353 353 val_353 +353 val_353 353 val_353 +353 val_353 353 val_353 +353 val_353 353 val_353 +356 val_356 356 val_356 +360 val_360 360 val_360 +362 val_362 362 val_362 +364 val_364 364 val_364 +365 val_365 365 val_365 +366 val_366 366 val_366 +367 val_367 367 val_367 +367 val_367 367 val_367 +367 val_367 367 val_367 +367 val_367 367 val_367 +368 val_368 368 val_368 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +369 val_369 369 val_369 +373 val_373 373 val_373 +374 val_374 374 val_374 +375 val_375 375 val_375 +377 val_377 377 val_377 +378 val_378 378 val_378 +379 val_379 379 val_379 +382 val_382 382 val_382 +382 val_382 382 val_382 +382 val_382 382 val_382 +382 val_382 382 val_382 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +384 val_384 384 val_384 +386 val_386 386 val_386 +389 val_389 389 val_389 +392 val_392 392 val_392 +393 val_393 393 val_393 +394 val_394 394 val_394 +395 val_395 395 val_395 +395 val_395 395 val_395 +395 val_395 395 val_395 +395 val_395 395 val_395 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +396 val_396 396 val_396 +397 val_397 397 val_397 +397 val_397 397 val_397 +397 val_397 397 val_397 +397 val_397 397 val_397 +399 val_399 399 val_399 +399 val_399 399 val_399 +399 val_399 399 val_399 +399 val_399 399 val_399 +400 val_400 400 val_400 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +402 val_402 402 val_402 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +403 val_403 403 val_403 +404 val_404 404 val_404 +404 val_404 404 val_404 +404 val_404 404 val_404 +404 val_404 404 val_404 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +407 val_407 407 val_407 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +409 val_409 409 val_409 +411 val_411 411 val_411 +413 val_413 413 val_413 +413 val_413 413 val_413 +413 val_413 413 val_413 +413 val_413 413 val_413 +414 val_414 414 val_414 +414 val_414 414 val_414 +414 val_414 414 val_414 +414 val_414 414 val_414 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +417 val_417 417 val_417 +418 val_418 418 val_418 +419 val_419 419 val_419 +421 val_421 421 val_421 +424 val_424 424 val_424 +424 val_424 424 val_424 +424 val_424 424 val_424 +424 val_424 424 val_424 +427 val_427 427 val_427 +429 val_429 429 val_429 +429 val_429 429 val_429 +429 val_429 429 val_429 +429 val_429 429 val_429 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +430 val_430 430 val_430 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +431 val_431 431 val_431 +432 val_432 432 val_432 +435 val_435 435 val_435 +436 val_436 436 val_436 +437 val_437 437 val_437 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +438 val_438 438 val_438 +439 val_439 439 val_439 +439 val_439 439 val_439 +439 val_439 439 val_439 +439 val_439 439 val_439 +443 val_443 443 val_443 +444 val_444 444 val_444 +446 val_446 446 val_446 +448 val_448 448 val_448 +449 val_449 449 val_449 +452 val_452 452 val_452 +453 val_453 453 val_453 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +454 val_454 454 val_454 +455 val_455 455 val_455 +457 val_457 457 val_457 +458 val_458 458 val_458 +458 val_458 458 val_458 +458 val_458 458 val_458 +458 val_458 458 val_458 +459 val_459 459 val_459 +459 val_459 459 val_459 +459 val_459 459 val_459 +459 val_459 459 val_459 +460 val_460 460 val_460 +462 val_462 462 val_462 +462 val_462 462 val_462 +462 val_462 462 val_462 +462 val_462 462 val_462 +463 val_463 463 val_463 +463 val_463 463 val_463 +463 val_463 463 val_463 +463 val_463 463 val_463 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +466 val_466 466 val_466 +467 val_467 467 val_467 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +468 val_468 468 val_468 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +469 val_469 469 val_469 +470 val_470 470 val_470 +472 val_472 472 val_472 +475 val_475 475 val_475 +477 val_477 477 val_477 +478 val_478 478 val_478 +478 val_478 478 val_478 +478 val_478 478 val_478 +478 val_478 478 val_478 +479 val_479 479 val_479 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +480 val_480 480 val_480 +481 val_481 481 val_481 +482 val_482 482 val_482 +483 val_483 483 val_483 +484 val_484 484 val_484 +485 val_485 485 val_485 +487 val_487 487 val_487 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +489 val_489 489 val_489 +490 val_490 490 val_490 +491 val_491 491 val_491 +492 val_492 492 val_492 +492 val_492 492 val_492 +492 val_492 492 val_492 +492 val_492 492 val_492 +493 val_493 493 val_493 +494 val_494 494 val_494 +495 val_495 495 val_495 +496 val_496 496 val_496 +497 val_497 497 val_497 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +498 val_498 498 val_498 +PREHOOK: query: DROP TABLE table_key_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@table_key_1 +PREHOOK: Output: default@table_key_1 +POSTHOOK: query: DROP TABLE table_key_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@table_key_1 +POSTHOOK: Output: default@table_key_1 +PREHOOK: query: DROP TABLE table_key_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@table_key_2 +PREHOOK: Output: default@table_key_2 +POSTHOOK: query: DROP TABLE table_key_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@table_key_2 +POSTHOOK: Output: default@table_key_2 +Encryption key deleted: 'key_1' +Encryption key deleted: 'key_2' diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_load_data_to_encrypted_tables.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_load_data_to_encrypted_tables.q.out new file mode 100644 index 0000000..46f36d8 --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_load_data_to_encrypted_tables.q.out @@ -0,0 +1,1059 @@ +PREHOOK: query: DROP TABLE IF EXISTS encrypted_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS encrypted_table +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@encrypted_table +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@encrypted_table +Encryption key created: 'key1' +Encryption zone created: '/build/ql/test/data/warehouse/encrypted_table' using key: 'key1' +PREHOOK: query: -- Test loading data from the local filesystem; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: -- Test loading data from the local filesystem; +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@encrypted_table +PREHOOK: query: SELECT * FROM encrypted_table +PREHOOK: type: QUERY +PREHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging +POSTHOOK: query: SELECT * FROM encrypted_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@encrypted_table +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@encrypted_table +PREHOOK: query: SELECT * FROM encrypted_table +PREHOOK: type: QUERY +PREHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging +POSTHOOK: query: SELECT * FROM encrypted_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encrypted_table +#### A PARTIAL masked pattern was here #### data/warehouse/encrypted_table/.hive-staging +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 +265 val_265 +193 val_193 +401 val_401 +150 val_150 +273 val_273 +224 val_224 +369 val_369 +66 val_66 +128 val_128 +213 val_213 +146 val_146 +406 val_406 +429 val_429 +374 val_374 +152 val_152 +469 val_469 +145 val_145 +495 val_495 +37 val_37 +327 val_327 +281 val_281 +277 val_277 +209 val_209 +15 val_15 +82 val_82 +403 val_403 +166 val_166 +417 val_417 +430 val_430 +252 val_252 +292 val_292 +219 val_219 +287 val_287 +153 val_153 +193 val_193 +338 val_338 +446 val_446 +459 val_459 +394 val_394 +237 val_237 +482 val_482 +174 val_174 +413 val_413 +494 val_494 +207 val_207 +199 val_199 +466 val_466 +208 val_208 +174 val_174 +399 val_399 +396 val_396 +247 val_247 +417 val_417 +489 val_489 +162 val_162 +377 val_377 +397 val_397 +309 val_309 +365 val_365 +266 val_266 +439 val_439 +342 val_342 +367 val_367 +325 val_325 +167 val_167 +195 val_195 +475 val_475 +17 val_17 +113 val_113 +155 val_155 +203 val_203 +339 val_339 +0 val_0 +455 val_455 +128 val_128 +311 val_311 +316 val_316 +57 val_57 +302 val_302 +205 val_205 +149 val_149 +438 val_438 +345 val_345 +129 val_129 +170 val_170 +20 val_20 +489 val_489 +157 val_157 +378 val_378 +221 val_221 +92 val_92 +111 val_111 +47 val_47 +72 val_72 +4 val_4 +280 val_280 +35 val_35 +427 val_427 +277 val_277 +208 val_208 +356 val_356 +399 val_399 +169 val_169 +382 val_382 +498 val_498 +125 val_125 +386 val_386 +437 val_437 +469 val_469 +192 val_192 +286 val_286 +187 val_187 +176 val_176 +54 val_54 +459 val_459 +51 val_51 +138 val_138 +103 val_103 +239 val_239 +213 val_213 +216 val_216 +430 val_430 +278 val_278 +176 val_176 +289 val_289 +221 val_221 +65 val_65 +318 val_318 +332 val_332 +311 val_311 +275 val_275 +137 val_137 +241 val_241 +83 val_83 +333 val_333 +180 val_180 +284 val_284 +12 val_12 +230 val_230 +181 val_181 +67 val_67 +260 val_260 +404 val_404 +384 val_384 +489 val_489 +353 val_353 +373 val_373 +272 val_272 +138 val_138 +217 val_217 +84 val_84 +348 val_348 +466 val_466 +58 val_58 +8 val_8 +411 val_411 +230 val_230 +208 val_208 +348 val_348 +24 val_24 +463 val_463 +431 val_431 +179 val_179 +172 val_172 +42 val_42 +129 val_129 +158 val_158 +119 val_119 +496 val_496 +0 val_0 +322 val_322 +197 val_197 +468 val_468 +393 val_393 +454 val_454 +100 val_100 +298 val_298 +199 val_199 +191 val_191 +418 val_418 +96 val_96 +26 val_26 +165 val_165 +327 val_327 +230 val_230 +205 val_205 +120 val_120 +131 val_131 +51 val_51 +404 val_404 +43 val_43 +436 val_436 +156 val_156 +469 val_469 +468 val_468 +308 val_308 +95 val_95 +196 val_196 +288 val_288 +481 val_481 +457 val_457 +98 val_98 +282 val_282 +197 val_197 +187 val_187 +318 val_318 +318 val_318 +409 val_409 +470 val_470 +137 val_137 +369 val_369 +316 val_316 +169 val_169 +413 val_413 +85 val_85 +77 val_77 +0 val_0 +490 val_490 +87 val_87 +364 val_364 +179 val_179 +118 val_118 +134 val_134 +395 val_395 +282 val_282 +138 val_138 +238 val_238 +419 val_419 +15 val_15 +118 val_118 +72 val_72 +90 val_90 +307 val_307 +19 val_19 +435 val_435 +10 val_10 +277 val_277 +273 val_273 +306 val_306 +224 val_224 +309 val_309 +389 val_389 +327 val_327 +242 val_242 +369 val_369 +392 val_392 +272 val_272 +331 val_331 +401 val_401 +242 val_242 +452 val_452 +177 val_177 +226 val_226 +5 val_5 +497 val_497 +402 val_402 +396 val_396 +317 val_317 +395 val_395 +58 val_58 +35 val_35 +336 val_336 +95 val_95 +11 val_11 +168 val_168 +34 val_34 +229 val_229 +233 val_233 +143 val_143 +472 val_472 +322 val_322 +498 val_498 +160 val_160 +195 val_195 +42 val_42 +321 val_321 +430 val_430 +119 val_119 +489 val_489 +458 val_458 +78 val_78 +76 val_76 +41 val_41 +223 val_223 +492 val_492 +149 val_149 +449 val_449 +218 val_218 +228 val_228 +138 val_138 +453 val_453 +30 val_30 +209 val_209 +64 val_64 +468 val_468 +76 val_76 +74 val_74 +342 val_342 +69 val_69 +230 val_230 +33 val_33 +368 val_368 +103 val_103 +296 val_296 +113 val_113 +216 val_216 +367 val_367 +344 val_344 +167 val_167 +274 val_274 +219 val_219 +239 val_239 +485 val_485 +116 val_116 +223 val_223 +256 val_256 +263 val_263 +70 val_70 +487 val_487 +480 val_480 +401 val_401 +288 val_288 +191 val_191 +5 val_5 +244 val_244 +438 val_438 +128 val_128 +467 val_467 +432 val_432 +202 val_202 +316 val_316 +229 val_229 +469 val_469 +463 val_463 +280 val_280 +2 val_2 +35 val_35 +283 val_283 +331 val_331 +235 val_235 +80 val_80 +44 val_44 +193 val_193 +321 val_321 +335 val_335 +104 val_104 +466 val_466 +366 val_366 +175 val_175 +403 val_403 +483 val_483 +53 val_53 +105 val_105 +257 val_257 +406 val_406 +409 val_409 +190 val_190 +406 val_406 +401 val_401 +114 val_114 +258 val_258 +90 val_90 +203 val_203 +262 val_262 +348 val_348 +424 val_424 +12 val_12 +396 val_396 +201 val_201 +217 val_217 +164 val_164 +431 val_431 +454 val_454 +478 val_478 +298 val_298 +125 val_125 +431 val_431 +164 val_164 +424 val_424 +187 val_187 +382 val_382 +5 val_5 +70 val_70 +397 val_397 +480 val_480 +291 val_291 +24 val_24 +351 val_351 +255 val_255 +104 val_104 +70 val_70 +163 val_163 +438 val_438 +119 val_119 +414 val_414 +200 val_200 +491 val_491 +237 val_237 +439 val_439 +360 val_360 +248 val_248 +479 val_479 +305 val_305 +417 val_417 +199 val_199 +444 val_444 +120 val_120 +429 val_429 +169 val_169 +443 val_443 +323 val_323 +325 val_325 +277 val_277 +230 val_230 +478 val_478 +178 val_178 +468 val_468 +310 val_310 +317 val_317 +333 val_333 +493 val_493 +460 val_460 +207 val_207 +249 val_249 +265 val_265 +480 val_480 +83 val_83 +136 val_136 +353 val_353 +172 val_172 +214 val_214 +462 val_462 +233 val_233 +406 val_406 +133 val_133 +175 val_175 +189 val_189 +454 val_454 +375 val_375 +401 val_401 +421 val_421 +407 val_407 +384 val_384 +256 val_256 +26 val_26 +134 val_134 +67 val_67 +384 val_384 +379 val_379 +18 val_18 +462 val_462 +492 val_492 +100 val_100 +298 val_298 +9 val_9 +341 val_341 +498 val_498 +146 val_146 +458 val_458 +362 val_362 +186 val_186 +285 val_285 +348 val_348 +167 val_167 +18 val_18 +273 val_273 +183 val_183 +281 val_281 +344 val_344 +97 val_97 +469 val_469 +315 val_315 +84 val_84 +28 val_28 +37 val_37 +448 val_448 +152 val_152 +348 val_348 +307 val_307 +194 val_194 +414 val_414 +477 val_477 +222 val_222 +126 val_126 +90 val_90 +169 val_169 +403 val_403 +400 val_400 +200 val_200 +97 val_97 +PREHOOK: query: DROP TABLE encrypted_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@encrypted_table +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: DROP TABLE encrypted_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@encrypted_table +POSTHOOK: Output: default@encrypted_table +Encryption key deleted: 'key1' diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out new file mode 100644 index 0000000..c4c0dde --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out @@ -0,0 +1,45 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS encrypted_table +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@encrypted_table +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@encrypted_table +Encryption key created: 'key_128' +Encryption zone created: '/build/ql/test/data/warehouse/default/encrypted_table' using key: 'key_128' +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@encrypted_table +PREHOOK: query: SELECT count(*) FROM encrypted_table +PREHOOK: type: QUERY +PREHOOK: Input: default@encrypted_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(*) FROM encrypted_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@encrypted_table +#### A masked pattern was here #### +500 +PREHOOK: query: drop table encrypted_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@encrypted_table +PREHOOK: Output: default@encrypted_table +POSTHOOK: query: drop table encrypted_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@encrypted_table +POSTHOOK: Output: default@encrypted_table diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_unencrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_unencrypted_tbl.q.out new file mode 100644 index 0000000..34c152f --- /dev/null +++ b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_unencrypted_tbl.q.out @@ -0,0 +1,43 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS unencrypted_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- SORT_QUERY_RESULTS + +DROP TABLE IF EXISTS unencrypted_table +POSTHOOK: type: DROPTABLE +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@unencrypted_table +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@unencrypted_table +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE unencrypted_table +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@unencrypted_table +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE unencrypted_table +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@unencrypted_table +PREHOOK: query: SELECT count(*) FROM unencrypted_table +PREHOOK: type: QUERY +PREHOOK: Input: default@unencrypted_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT count(*) FROM unencrypted_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@unencrypted_table +#### A masked pattern was here #### +500 +PREHOOK: query: drop table unencrypted_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@unencrypted_table +PREHOOK: Output: default@unencrypted_table +POSTHOOK: query: drop table unencrypted_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@unencrypted_table +POSTHOOK: Output: default@unencrypted_table diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out index 75f85c3..6edb911 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out @@ -277,6 +277,1959 @@ POSTHOOK: Input: default@test_table2@ds=1 POSTHOOK: Output: default@test_table3@ds=1 POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM test_table1 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table1 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +8 val_8 1 +9 val_9 1 +10 val_10 1 +11 val_11 1 +12 val_12 1 +12 val_12 1 +15 val_15 1 +15 val_15 1 +17 val_17 1 +18 val_18 1 +18 val_18 1 +19 val_19 1 +20 val_20 1 +24 val_24 1 +24 val_24 1 +26 val_26 1 +26 val_26 1 +27 val_27 1 +28 val_28 1 +30 val_30 1 +33 val_33 1 +34 val_34 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +37 val_37 1 +37 val_37 1 +41 val_41 1 +42 val_42 1 +42 val_42 1 +43 val_43 1 +44 val_44 1 +47 val_47 1 +51 val_51 1 +51 val_51 1 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 1 +58 val_58 1 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 1 +67 val_67 1 +69 val_69 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +72 val_72 1 +72 val_72 1 +74 val_74 1 +76 val_76 1 +76 val_76 1 +77 val_77 1 +78 val_78 1 +80 val_80 1 +82 val_82 1 +83 val_83 1 +83 val_83 1 +84 val_84 1 +84 val_84 1 +85 val_85 1 +86 val_86 1 +87 val_87 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +92 val_92 1 +95 val_95 1 +95 val_95 1 +96 val_96 1 +97 val_97 1 +97 val_97 1 +98 val_98 1 +98 val_98 1 +100 val_100 1 +100 val_100 1 +103 val_103 1 +103 val_103 1 +104 val_104 1 +104 val_104 1 +105 val_105 1 +111 val_111 1 +113 val_113 1 +113 val_113 1 +114 val_114 1 +116 val_116 1 +118 val_118 1 +118 val_118 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +120 val_120 1 +120 val_120 1 +125 val_125 1 +125 val_125 1 +126 val_126 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +129 val_129 1 +129 val_129 1 +131 val_131 1 +133 val_133 1 +134 val_134 1 +134 val_134 1 +136 val_136 1 +137 val_137 1 +137 val_137 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +143 val_143 1 +145 val_145 1 +146 val_146 1 +146 val_146 1 +149 val_149 1 +149 val_149 1 +150 val_150 1 +152 val_152 1 +152 val_152 1 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 1 +164 val_164 1 +165 val_165 1 +165 val_165 1 +166 val_166 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +168 val_168 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +170 val_170 1 +172 val_172 1 +172 val_172 1 +174 val_174 1 +174 val_174 1 +175 val_175 1 +175 val_175 1 +176 val_176 1 +176 val_176 1 +177 val_177 1 +178 val_178 1 +179 val_179 1 +179 val_179 1 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +189 val_189 1 +190 val_190 1 +191 val_191 1 +191 val_191 1 +192 val_192 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +194 val_194 1 +195 val_195 1 +195 val_195 1 +196 val_196 1 +197 val_197 1 +197 val_197 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +200 val_200 1 +200 val_200 1 +201 val_201 1 +202 val_202 1 +203 val_203 1 +203 val_203 1 +205 val_205 1 +205 val_205 1 +207 val_207 1 +207 val_207 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +209 val_209 1 +209 val_209 1 +213 val_213 1 +213 val_213 1 +214 val_214 1 +216 val_216 1 +216 val_216 1 +217 val_217 1 +217 val_217 1 +218 val_218 1 +219 val_219 1 +219 val_219 1 +221 val_221 1 +221 val_221 1 +222 val_222 1 +223 val_223 1 +223 val_223 1 +224 val_224 1 +224 val_224 1 +226 val_226 1 +228 val_228 1 +229 val_229 1 +229 val_229 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +233 val_233 1 +233 val_233 1 +235 val_235 1 +237 val_237 1 +237 val_237 1 +238 val_238 1 +238 val_238 1 +239 val_239 1 +239 val_239 1 +241 val_241 1 +242 val_242 1 +242 val_242 1 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 1 +255 val_255 1 +256 val_256 1 +256 val_256 1 +257 val_257 1 +258 val_258 1 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 1 +265 val_265 1 +266 val_266 1 +272 val_272 1 +272 val_272 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +274 val_274 1 +275 val_275 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +278 val_278 1 +278 val_278 1 +280 val_280 1 +280 val_280 1 +281 val_281 1 +281 val_281 1 +282 val_282 1 +282 val_282 1 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 1 +288 val_288 1 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 1 +307 val_307 1 +308 val_308 1 +309 val_309 1 +309 val_309 1 +310 val_310 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +315 val_315 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +317 val_317 1 +317 val_317 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +321 val_321 1 +321 val_321 1 +322 val_322 1 +322 val_322 1 +323 val_323 1 +325 val_325 1 +325 val_325 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +331 val_331 1 +331 val_331 1 +332 val_332 1 +333 val_333 1 +333 val_333 1 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +341 val_341 1 +342 val_342 1 +342 val_342 1 +344 val_344 1 +344 val_344 1 +345 val_345 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +351 val_351 1 +353 val_353 1 +353 val_353 1 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 1 +367 val_367 1 +368 val_368 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 1 +382 val_382 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 1 +395 val_395 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +397 val_397 1 +397 val_397 1 +399 val_399 1 +399 val_399 1 +400 val_400 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +402 val_402 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +404 val_404 1 +404 val_404 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +407 val_407 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +411 val_411 1 +413 val_413 1 +413 val_413 1 +414 val_414 1 +414 val_414 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +418 val_418 1 +419 val_419 1 +421 val_421 1 +424 val_424 1 +424 val_424 1 +427 val_427 1 +429 val_429 1 +429 val_429 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +439 val_439 1 +439 val_439 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +455 val_455 1 +457 val_457 1 +458 val_458 1 +458 val_458 1 +459 val_459 1 +459 val_459 1 +460 val_460 1 +462 val_462 1 +462 val_462 1 +463 val_463 1 +463 val_463 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +467 val_467 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 1 +478 val_478 1 +479 val_479 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +490 val_490 1 +491 val_491 1 +492 val_492 1 +492 val_492 1 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +PREHOOK: query: SELECT * FROM test_table3 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table3 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +8 val_8 1 +9 val_9 1 +10 val_10 1 +11 val_11 1 +12 val_12 1 +12 val_12 1 +12 val_12 1 +12 val_12 1 +15 val_15 1 +15 val_15 1 +15 val_15 1 +15 val_15 1 +17 val_17 1 +18 val_18 1 +18 val_18 1 +18 val_18 1 +18 val_18 1 +19 val_19 1 +20 val_20 1 +24 val_24 1 +24 val_24 1 +24 val_24 1 +24 val_24 1 +26 val_26 1 +26 val_26 1 +26 val_26 1 +26 val_26 1 +27 val_27 1 +28 val_28 1 +30 val_30 1 +33 val_33 1 +34 val_34 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +37 val_37 1 +37 val_37 1 +37 val_37 1 +37 val_37 1 +41 val_41 1 +42 val_42 1 +42 val_42 1 +42 val_42 1 +42 val_42 1 +43 val_43 1 +44 val_44 1 +47 val_47 1 +51 val_51 1 +51 val_51 1 +51 val_51 1 +51 val_51 1 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 1 +58 val_58 1 +58 val_58 1 +58 val_58 1 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 1 +67 val_67 1 +67 val_67 1 +67 val_67 1 +69 val_69 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +72 val_72 1 +72 val_72 1 +72 val_72 1 +72 val_72 1 +74 val_74 1 +76 val_76 1 +76 val_76 1 +76 val_76 1 +76 val_76 1 +77 val_77 1 +78 val_78 1 +80 val_80 1 +82 val_82 1 +83 val_83 1 +83 val_83 1 +83 val_83 1 +83 val_83 1 +84 val_84 1 +84 val_84 1 +84 val_84 1 +84 val_84 1 +85 val_85 1 +86 val_86 1 +87 val_87 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +92 val_92 1 +95 val_95 1 +95 val_95 1 +95 val_95 1 +95 val_95 1 +96 val_96 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +98 val_98 1 +98 val_98 1 +98 val_98 1 +98 val_98 1 +100 val_100 1 +100 val_100 1 +100 val_100 1 +100 val_100 1 +103 val_103 1 +103 val_103 1 +103 val_103 1 +103 val_103 1 +104 val_104 1 +104 val_104 1 +104 val_104 1 +104 val_104 1 +105 val_105 1 +111 val_111 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +114 val_114 1 +116 val_116 1 +118 val_118 1 +118 val_118 1 +118 val_118 1 +118 val_118 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +120 val_120 1 +120 val_120 1 +120 val_120 1 +120 val_120 1 +125 val_125 1 +125 val_125 1 +125 val_125 1 +125 val_125 1 +126 val_126 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +131 val_131 1 +133 val_133 1 +134 val_134 1 +134 val_134 1 +134 val_134 1 +134 val_134 1 +136 val_136 1 +137 val_137 1 +137 val_137 1 +137 val_137 1 +137 val_137 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +143 val_143 1 +145 val_145 1 +146 val_146 1 +146 val_146 1 +146 val_146 1 +146 val_146 1 +149 val_149 1 +149 val_149 1 +149 val_149 1 +149 val_149 1 +150 val_150 1 +152 val_152 1 +152 val_152 1 +152 val_152 1 +152 val_152 1 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 1 +164 val_164 1 +164 val_164 1 +164 val_164 1 +165 val_165 1 +165 val_165 1 +165 val_165 1 +165 val_165 1 +166 val_166 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +168 val_168 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +170 val_170 1 +172 val_172 1 +172 val_172 1 +172 val_172 1 +172 val_172 1 +174 val_174 1 +174 val_174 1 +174 val_174 1 +174 val_174 1 +175 val_175 1 +175 val_175 1 +175 val_175 1 +175 val_175 1 +176 val_176 1 +176 val_176 1 +176 val_176 1 +176 val_176 1 +177 val_177 1 +178 val_178 1 +179 val_179 1 +179 val_179 1 +179 val_179 1 +179 val_179 1 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +189 val_189 1 +190 val_190 1 +191 val_191 1 +191 val_191 1 +191 val_191 1 +191 val_191 1 +192 val_192 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +194 val_194 1 +195 val_195 1 +195 val_195 1 +195 val_195 1 +195 val_195 1 +196 val_196 1 +197 val_197 1 +197 val_197 1 +197 val_197 1 +197 val_197 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +200 val_200 1 +200 val_200 1 +200 val_200 1 +200 val_200 1 +201 val_201 1 +202 val_202 1 +203 val_203 1 +203 val_203 1 +203 val_203 1 +203 val_203 1 +205 val_205 1 +205 val_205 1 +205 val_205 1 +205 val_205 1 +207 val_207 1 +207 val_207 1 +207 val_207 1 +207 val_207 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +213 val_213 1 +213 val_213 1 +213 val_213 1 +213 val_213 1 +214 val_214 1 +216 val_216 1 +216 val_216 1 +216 val_216 1 +216 val_216 1 +217 val_217 1 +217 val_217 1 +217 val_217 1 +217 val_217 1 +218 val_218 1 +219 val_219 1 +219 val_219 1 +219 val_219 1 +219 val_219 1 +221 val_221 1 +221 val_221 1 +221 val_221 1 +221 val_221 1 +222 val_222 1 +223 val_223 1 +223 val_223 1 +223 val_223 1 +223 val_223 1 +224 val_224 1 +224 val_224 1 +224 val_224 1 +224 val_224 1 +226 val_226 1 +228 val_228 1 +229 val_229 1 +229 val_229 1 +229 val_229 1 +229 val_229 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +233 val_233 1 +233 val_233 1 +233 val_233 1 +233 val_233 1 +235 val_235 1 +237 val_237 1 +237 val_237 1 +237 val_237 1 +237 val_237 1 +238 val_238 1 +238 val_238 1 +238 val_238 1 +238 val_238 1 +239 val_239 1 +239 val_239 1 +239 val_239 1 +239 val_239 1 +241 val_241 1 +242 val_242 1 +242 val_242 1 +242 val_242 1 +242 val_242 1 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 1 +255 val_255 1 +255 val_255 1 +255 val_255 1 +256 val_256 1 +256 val_256 1 +256 val_256 1 +256 val_256 1 +257 val_257 1 +258 val_258 1 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 1 +265 val_265 1 +265 val_265 1 +265 val_265 1 +266 val_266 1 +272 val_272 1 +272 val_272 1 +272 val_272 1 +272 val_272 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +274 val_274 1 +275 val_275 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +278 val_278 1 +278 val_278 1 +278 val_278 1 +278 val_278 1 +280 val_280 1 +280 val_280 1 +280 val_280 1 +280 val_280 1 +281 val_281 1 +281 val_281 1 +281 val_281 1 +281 val_281 1 +282 val_282 1 +282 val_282 1 +282 val_282 1 +282 val_282 1 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 1 +288 val_288 1 +288 val_288 1 +288 val_288 1 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 1 +307 val_307 1 +307 val_307 1 +307 val_307 1 +308 val_308 1 +309 val_309 1 +309 val_309 1 +309 val_309 1 +309 val_309 1 +310 val_310 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +315 val_315 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +317 val_317 1 +317 val_317 1 +317 val_317 1 +317 val_317 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +322 val_322 1 +322 val_322 1 +322 val_322 1 +322 val_322 1 +323 val_323 1 +325 val_325 1 +325 val_325 1 +325 val_325 1 +325 val_325 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +331 val_331 1 +331 val_331 1 +331 val_331 1 +331 val_331 1 +332 val_332 1 +333 val_333 1 +333 val_333 1 +333 val_333 1 +333 val_333 1 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +341 val_341 1 +342 val_342 1 +342 val_342 1 +342 val_342 1 +342 val_342 1 +344 val_344 1 +344 val_344 1 +344 val_344 1 +344 val_344 1 +345 val_345 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +351 val_351 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 1 +367 val_367 1 +367 val_367 1 +367 val_367 1 +368 val_368 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 1 +382 val_382 1 +382 val_382 1 +382 val_382 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 1 +395 val_395 1 +395 val_395 1 +395 val_395 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +397 val_397 1 +397 val_397 1 +397 val_397 1 +397 val_397 1 +399 val_399 1 +399 val_399 1 +399 val_399 1 +399 val_399 1 +400 val_400 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +402 val_402 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +404 val_404 1 +404 val_404 1 +404 val_404 1 +404 val_404 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +407 val_407 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +411 val_411 1 +413 val_413 1 +413 val_413 1 +413 val_413 1 +413 val_413 1 +414 val_414 1 +414 val_414 1 +414 val_414 1 +414 val_414 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +418 val_418 1 +419 val_419 1 +421 val_421 1 +424 val_424 1 +424 val_424 1 +424 val_424 1 +424 val_424 1 +427 val_427 1 +429 val_429 1 +429 val_429 1 +429 val_429 1 +429 val_429 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +439 val_439 1 +439 val_439 1 +439 val_439 1 +439 val_439 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +455 val_455 1 +457 val_457 1 +458 val_458 1 +458 val_458 1 +458 val_458 1 +458 val_458 1 +459 val_459 1 +459 val_459 1 +459 val_459 1 +459 val_459 1 +460 val_460 1 +462 val_462 1 +462 val_462 1 +462 val_462 1 +462 val_462 1 +463 val_463 1 +463 val_463 1 +463 val_463 1 +463 val_463 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +467 val_467 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 1 +478 val_478 1 +478 val_478 1 +478 val_478 1 +479 val_479 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +490 val_490 1 +491 val_491 1 +492 val_492 1 +492 val_492 1 +492 val_492 1 +492 val_492 1 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + test_table1 + TOK_TABLEBUCKETSAMPLE + 2 + 16 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2 + columns.types int:string:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: 000001_0 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table1 + numFiles 16 + numRows 500 + partition_columns ds + partition_columns.types string + rawDataSize 5312 + serialization.ddl struct test_table1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table1 + partition_columns ds + partition_columns.types string + serialization.ddl struct test_table1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table1 + name: default.test_table1 + Truncated Path -> Alias: + /test_table1/ds=1/000001_0 [test_table1] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + test_table3 + TOK_TABLEBUCKETSAMPLE + 2 + 16 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: test_table3 + Statistics: Num rows: 1028 Data size: 10968 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) + Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2 + columns.types int:string:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: (ds%3D1)000001_0 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table3 + numFiles 16 + numRows 1028 + partition_columns ds + partition_columns.types string + rawDataSize 10968 + serialization.ddl struct test_table3 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 11996 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table3 + partition_columns ds + partition_columns.types string + serialization.ddl struct test_table3 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + name: default.test_table3 + Truncated Path -> Alias: + /test_table3/ds=1/(ds%3D1)000001_0 [test_table3] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +17 val_17 1 +33 val_33 1 +65 val_65 1 +97 val_97 1 +97 val_97 1 +113 val_113 1 +113 val_113 1 +129 val_129 1 +129 val_129 1 +145 val_145 1 +177 val_177 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +209 val_209 1 +209 val_209 1 +241 val_241 1 +257 val_257 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +289 val_289 1 +305 val_305 1 +321 val_321 1 +321 val_321 1 +353 val_353 1 +353 val_353 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +449 val_449 1 +481 val_481 1 +497 val_497 1 +PREHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +17 val_17 1 +33 val_33 1 +65 val_65 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +145 val_145 1 +177 val_177 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +241 val_241 1 +257 val_257 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +289 val_289 1 +305 val_305 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +449 val_449 1 +481 val_481 1 +497 val_497 1 PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out index 19ae67f..6881804 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out @@ -283,6 +283,1895 @@ POSTHOOK: Input: default@test_table2@ds=1 POSTHOOK: Output: default@test_table3@ds=1 POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: SELECT * FROM test_table1 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table1 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +8 val_8 1 +9 val_9 1 +10 val_10 1 +11 val_11 1 +12 val_12 1 +12 val_12 1 +15 val_15 1 +15 val_15 1 +17 val_17 1 +18 val_18 1 +18 val_18 1 +19 val_19 1 +20 val_20 1 +24 val_24 1 +24 val_24 1 +26 val_26 1 +26 val_26 1 +27 val_27 1 +28 val_28 1 +30 val_30 1 +33 val_33 1 +34 val_34 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +37 val_37 1 +37 val_37 1 +41 val_41 1 +42 val_42 1 +42 val_42 1 +43 val_43 1 +44 val_44 1 +47 val_47 1 +51 val_51 1 +51 val_51 1 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 1 +58 val_58 1 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 1 +67 val_67 1 +69 val_69 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +72 val_72 1 +72 val_72 1 +74 val_74 1 +76 val_76 1 +76 val_76 1 +77 val_77 1 +78 val_78 1 +80 val_80 1 +82 val_82 1 +83 val_83 1 +83 val_83 1 +84 val_84 1 +84 val_84 1 +85 val_85 1 +86 val_86 1 +87 val_87 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +92 val_92 1 +95 val_95 1 +95 val_95 1 +96 val_96 1 +97 val_97 1 +97 val_97 1 +98 val_98 1 +98 val_98 1 +100 val_100 1 +100 val_100 1 +103 val_103 1 +103 val_103 1 +104 val_104 1 +104 val_104 1 +105 val_105 1 +111 val_111 1 +113 val_113 1 +113 val_113 1 +114 val_114 1 +116 val_116 1 +118 val_118 1 +118 val_118 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +120 val_120 1 +120 val_120 1 +125 val_125 1 +125 val_125 1 +126 val_126 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +129 val_129 1 +129 val_129 1 +131 val_131 1 +133 val_133 1 +134 val_134 1 +134 val_134 1 +136 val_136 1 +137 val_137 1 +137 val_137 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +143 val_143 1 +145 val_145 1 +146 val_146 1 +146 val_146 1 +149 val_149 1 +149 val_149 1 +150 val_150 1 +152 val_152 1 +152 val_152 1 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 1 +164 val_164 1 +165 val_165 1 +165 val_165 1 +166 val_166 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +168 val_168 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +170 val_170 1 +172 val_172 1 +172 val_172 1 +174 val_174 1 +174 val_174 1 +175 val_175 1 +175 val_175 1 +176 val_176 1 +176 val_176 1 +177 val_177 1 +178 val_178 1 +179 val_179 1 +179 val_179 1 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +189 val_189 1 +190 val_190 1 +191 val_191 1 +191 val_191 1 +192 val_192 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +194 val_194 1 +195 val_195 1 +195 val_195 1 +196 val_196 1 +197 val_197 1 +197 val_197 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +200 val_200 1 +200 val_200 1 +201 val_201 1 +202 val_202 1 +203 val_203 1 +203 val_203 1 +205 val_205 1 +205 val_205 1 +207 val_207 1 +207 val_207 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +209 val_209 1 +209 val_209 1 +213 val_213 1 +213 val_213 1 +214 val_214 1 +216 val_216 1 +216 val_216 1 +217 val_217 1 +217 val_217 1 +218 val_218 1 +219 val_219 1 +219 val_219 1 +221 val_221 1 +221 val_221 1 +222 val_222 1 +223 val_223 1 +223 val_223 1 +224 val_224 1 +224 val_224 1 +226 val_226 1 +228 val_228 1 +229 val_229 1 +229 val_229 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +233 val_233 1 +233 val_233 1 +235 val_235 1 +237 val_237 1 +237 val_237 1 +238 val_238 1 +238 val_238 1 +239 val_239 1 +239 val_239 1 +241 val_241 1 +242 val_242 1 +242 val_242 1 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 1 +255 val_255 1 +256 val_256 1 +256 val_256 1 +257 val_257 1 +258 val_258 1 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 1 +265 val_265 1 +266 val_266 1 +272 val_272 1 +272 val_272 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +274 val_274 1 +275 val_275 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +278 val_278 1 +278 val_278 1 +280 val_280 1 +280 val_280 1 +281 val_281 1 +281 val_281 1 +282 val_282 1 +282 val_282 1 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 1 +288 val_288 1 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 1 +307 val_307 1 +308 val_308 1 +309 val_309 1 +309 val_309 1 +310 val_310 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +315 val_315 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +317 val_317 1 +317 val_317 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +321 val_321 1 +321 val_321 1 +322 val_322 1 +322 val_322 1 +323 val_323 1 +325 val_325 1 +325 val_325 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +331 val_331 1 +331 val_331 1 +332 val_332 1 +333 val_333 1 +333 val_333 1 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +341 val_341 1 +342 val_342 1 +342 val_342 1 +344 val_344 1 +344 val_344 1 +345 val_345 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +351 val_351 1 +353 val_353 1 +353 val_353 1 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 1 +367 val_367 1 +368 val_368 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 1 +382 val_382 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 1 +395 val_395 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +397 val_397 1 +397 val_397 1 +399 val_399 1 +399 val_399 1 +400 val_400 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +402 val_402 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +404 val_404 1 +404 val_404 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +407 val_407 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +411 val_411 1 +413 val_413 1 +413 val_413 1 +414 val_414 1 +414 val_414 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +418 val_418 1 +419 val_419 1 +421 val_421 1 +424 val_424 1 +424 val_424 1 +427 val_427 1 +429 val_429 1 +429 val_429 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +439 val_439 1 +439 val_439 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +455 val_455 1 +457 val_457 1 +458 val_458 1 +458 val_458 1 +459 val_459 1 +459 val_459 1 +460 val_460 1 +462 val_462 1 +462 val_462 1 +463 val_463 1 +463 val_463 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +467 val_467 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 1 +478 val_478 1 +479 val_479 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +490 val_490 1 +491 val_491 1 +492 val_492 1 +492 val_492 1 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +PREHOOK: query: SELECT * FROM test_table3 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table3 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +0 val_0 1 +2 val_2 1 +4 val_4 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +5 val_5 1 +8 val_8 1 +9 val_9 1 +10 val_10 1 +11 val_11 1 +12 val_12 1 +12 val_12 1 +12 val_12 1 +12 val_12 1 +15 val_15 1 +15 val_15 1 +15 val_15 1 +15 val_15 1 +17 val_17 1 +18 val_18 1 +18 val_18 1 +18 val_18 1 +18 val_18 1 +19 val_19 1 +20 val_20 1 +24 val_24 1 +24 val_24 1 +24 val_24 1 +24 val_24 1 +26 val_26 1 +26 val_26 1 +26 val_26 1 +26 val_26 1 +27 val_27 1 +28 val_28 1 +30 val_30 1 +33 val_33 1 +34 val_34 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +35 val_35 1 +37 val_37 1 +37 val_37 1 +37 val_37 1 +37 val_37 1 +41 val_41 1 +42 val_42 1 +42 val_42 1 +42 val_42 1 +42 val_42 1 +43 val_43 1 +44 val_44 1 +47 val_47 1 +51 val_51 1 +51 val_51 1 +51 val_51 1 +51 val_51 1 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 1 +58 val_58 1 +58 val_58 1 +58 val_58 1 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 1 +67 val_67 1 +67 val_67 1 +67 val_67 1 +69 val_69 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +70 val_70 1 +72 val_72 1 +72 val_72 1 +72 val_72 1 +72 val_72 1 +74 val_74 1 +76 val_76 1 +76 val_76 1 +76 val_76 1 +76 val_76 1 +77 val_77 1 +78 val_78 1 +80 val_80 1 +82 val_82 1 +83 val_83 1 +83 val_83 1 +83 val_83 1 +83 val_83 1 +84 val_84 1 +84 val_84 1 +84 val_84 1 +84 val_84 1 +85 val_85 1 +86 val_86 1 +87 val_87 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +90 val_90 1 +92 val_92 1 +95 val_95 1 +95 val_95 1 +95 val_95 1 +95 val_95 1 +96 val_96 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +98 val_98 1 +98 val_98 1 +98 val_98 1 +98 val_98 1 +100 val_100 1 +100 val_100 1 +100 val_100 1 +100 val_100 1 +103 val_103 1 +103 val_103 1 +103 val_103 1 +103 val_103 1 +104 val_104 1 +104 val_104 1 +104 val_104 1 +104 val_104 1 +105 val_105 1 +111 val_111 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +114 val_114 1 +116 val_116 1 +118 val_118 1 +118 val_118 1 +118 val_118 1 +118 val_118 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +119 val_119 1 +120 val_120 1 +120 val_120 1 +120 val_120 1 +120 val_120 1 +125 val_125 1 +125 val_125 1 +125 val_125 1 +125 val_125 1 +126 val_126 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +128 val_128 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +131 val_131 1 +133 val_133 1 +134 val_134 1 +134 val_134 1 +134 val_134 1 +134 val_134 1 +136 val_136 1 +137 val_137 1 +137 val_137 1 +137 val_137 1 +137 val_137 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +138 val_138 1 +143 val_143 1 +145 val_145 1 +146 val_146 1 +146 val_146 1 +146 val_146 1 +146 val_146 1 +149 val_149 1 +149 val_149 1 +149 val_149 1 +149 val_149 1 +150 val_150 1 +152 val_152 1 +152 val_152 1 +152 val_152 1 +152 val_152 1 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 1 +164 val_164 1 +164 val_164 1 +164 val_164 1 +165 val_165 1 +165 val_165 1 +165 val_165 1 +165 val_165 1 +166 val_166 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +167 val_167 1 +168 val_168 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +169 val_169 1 +170 val_170 1 +172 val_172 1 +172 val_172 1 +172 val_172 1 +172 val_172 1 +174 val_174 1 +174 val_174 1 +174 val_174 1 +174 val_174 1 +175 val_175 1 +175 val_175 1 +175 val_175 1 +175 val_175 1 +176 val_176 1 +176 val_176 1 +176 val_176 1 +176 val_176 1 +177 val_177 1 +178 val_178 1 +179 val_179 1 +179 val_179 1 +179 val_179 1 +179 val_179 1 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +187 val_187 1 +189 val_189 1 +190 val_190 1 +191 val_191 1 +191 val_191 1 +191 val_191 1 +191 val_191 1 +192 val_192 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +194 val_194 1 +195 val_195 1 +195 val_195 1 +195 val_195 1 +195 val_195 1 +196 val_196 1 +197 val_197 1 +197 val_197 1 +197 val_197 1 +197 val_197 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +199 val_199 1 +200 val_200 1 +200 val_200 1 +200 val_200 1 +200 val_200 1 +201 val_201 1 +202 val_202 1 +203 val_203 1 +203 val_203 1 +203 val_203 1 +203 val_203 1 +205 val_205 1 +205 val_205 1 +205 val_205 1 +205 val_205 1 +207 val_207 1 +207 val_207 1 +207 val_207 1 +207 val_207 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +208 val_208 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +213 val_213 1 +213 val_213 1 +213 val_213 1 +213 val_213 1 +214 val_214 1 +216 val_216 1 +216 val_216 1 +216 val_216 1 +216 val_216 1 +217 val_217 1 +217 val_217 1 +217 val_217 1 +217 val_217 1 +218 val_218 1 +219 val_219 1 +219 val_219 1 +219 val_219 1 +219 val_219 1 +221 val_221 1 +221 val_221 1 +221 val_221 1 +221 val_221 1 +222 val_222 1 +223 val_223 1 +223 val_223 1 +223 val_223 1 +223 val_223 1 +224 val_224 1 +224 val_224 1 +224 val_224 1 +224 val_224 1 +226 val_226 1 +228 val_228 1 +229 val_229 1 +229 val_229 1 +229 val_229 1 +229 val_229 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +230 val_230 1 +233 val_233 1 +233 val_233 1 +233 val_233 1 +233 val_233 1 +235 val_235 1 +237 val_237 1 +237 val_237 1 +237 val_237 1 +237 val_237 1 +238 val_238 1 +238 val_238 1 +238 val_238 1 +238 val_238 1 +239 val_239 1 +239 val_239 1 +239 val_239 1 +239 val_239 1 +241 val_241 1 +242 val_242 1 +242 val_242 1 +242 val_242 1 +242 val_242 1 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 1 +255 val_255 1 +255 val_255 1 +255 val_255 1 +256 val_256 1 +256 val_256 1 +256 val_256 1 +256 val_256 1 +257 val_257 1 +258 val_258 1 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 1 +265 val_265 1 +265 val_265 1 +265 val_265 1 +266 val_266 1 +272 val_272 1 +272 val_272 1 +272 val_272 1 +272 val_272 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +274 val_274 1 +275 val_275 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +277 val_277 1 +278 val_278 1 +278 val_278 1 +278 val_278 1 +278 val_278 1 +280 val_280 1 +280 val_280 1 +280 val_280 1 +280 val_280 1 +281 val_281 1 +281 val_281 1 +281 val_281 1 +281 val_281 1 +282 val_282 1 +282 val_282 1 +282 val_282 1 +282 val_282 1 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 1 +288 val_288 1 +288 val_288 1 +288 val_288 1 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +298 val_298 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 1 +307 val_307 1 +307 val_307 1 +307 val_307 1 +308 val_308 1 +309 val_309 1 +309 val_309 1 +309 val_309 1 +309 val_309 1 +310 val_310 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +311 val_311 1 +315 val_315 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +316 val_316 1 +317 val_317 1 +317 val_317 1 +317 val_317 1 +317 val_317 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +318 val_318 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +322 val_322 1 +322 val_322 1 +322 val_322 1 +322 val_322 1 +323 val_323 1 +325 val_325 1 +325 val_325 1 +325 val_325 1 +325 val_325 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +327 val_327 1 +331 val_331 1 +331 val_331 1 +331 val_331 1 +331 val_331 1 +332 val_332 1 +333 val_333 1 +333 val_333 1 +333 val_333 1 +333 val_333 1 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +341 val_341 1 +342 val_342 1 +342 val_342 1 +342 val_342 1 +342 val_342 1 +344 val_344 1 +344 val_344 1 +344 val_344 1 +344 val_344 1 +345 val_345 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +348 val_348 1 +351 val_351 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 1 +367 val_367 1 +367 val_367 1 +367 val_367 1 +368 val_368 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 1 +382 val_382 1 +382 val_382 1 +382 val_382 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +384 val_384 1 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 1 +395 val_395 1 +395 val_395 1 +395 val_395 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +396 val_396 1 +397 val_397 1 +397 val_397 1 +397 val_397 1 +397 val_397 1 +399 val_399 1 +399 val_399 1 +399 val_399 1 +399 val_399 1 +400 val_400 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +402 val_402 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +403 val_403 1 +404 val_404 1 +404 val_404 1 +404 val_404 1 +404 val_404 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +406 val_406 1 +407 val_407 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +409 val_409 1 +411 val_411 1 +413 val_413 1 +413 val_413 1 +413 val_413 1 +413 val_413 1 +414 val_414 1 +414 val_414 1 +414 val_414 1 +414 val_414 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +418 val_418 1 +419 val_419 1 +421 val_421 1 +424 val_424 1 +424 val_424 1 +424 val_424 1 +424 val_424 1 +427 val_427 1 +429 val_429 1 +429 val_429 1 +429 val_429 1 +429 val_429 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +430 val_430 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +431 val_431 1 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +438 val_438 1 +439 val_439 1 +439 val_439 1 +439 val_439 1 +439 val_439 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +454 val_454 1 +455 val_455 1 +457 val_457 1 +458 val_458 1 +458 val_458 1 +458 val_458 1 +458 val_458 1 +459 val_459 1 +459 val_459 1 +459 val_459 1 +459 val_459 1 +460 val_460 1 +462 val_462 1 +462 val_462 1 +462 val_462 1 +462 val_462 1 +463 val_463 1 +463 val_463 1 +463 val_463 1 +463 val_463 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +466 val_466 1 +467 val_467 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +468 val_468 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +469 val_469 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 1 +478 val_478 1 +478 val_478 1 +478 val_478 1 +479 val_479 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +480 val_480 1 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +489 val_489 1 +490 val_490 1 +491 val_491 1 +492 val_492 1 +492 val_492 1 +492 val_492 1 +492 val_492 1 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +498 val_498 1 +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + test_table1 + TOK_TABLEBUCKETSAMPLE + 2 + 16 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table1 + numFiles 16 + numRows 500 + partition_columns ds + partition_columns.types string + rawDataSize 5312 + serialization.ddl struct test_table1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table1 + partition_columns ds + partition_columns.types string + serialization.ddl struct test_table1 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table1 + name: default.test_table1 + Processor Tree: + TableScan + alias: test_table1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + test_table3 + TOK_TABLEBUCKETSAMPLE + 2 + 16 + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: + Partition + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 1 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table3 + numFiles 16 + numRows 1028 + partition_columns ds + partition_columns.types string + rawDataSize 10968 + serialization.ddl struct test_table3 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 11996 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 16 + bucket_field_name key + columns key,value + columns.comments + columns.types int:string +#### A masked pattern was here #### + name default.test_table3 + partition_columns ds + partition_columns.types string + serialization.ddl struct test_table3 { i32 key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.test_table3 + name: default.test_table3 + Processor Tree: + TableScan + alias: test_table3 + Statistics: Num rows: 1028 Data size: 10968 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: (((hash(key) & 2147483647) % 16) = 1) (type: boolean) + Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 514 Data size: 5484 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table1 +PREHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table1 +POSTHOOK: Input: default@test_table1@ds=1 +#### A masked pattern was here #### +17 val_17 1 +33 val_33 1 +65 val_65 1 +97 val_97 1 +97 val_97 1 +113 val_113 1 +113 val_113 1 +129 val_129 1 +129 val_129 1 +145 val_145 1 +177 val_177 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +209 val_209 1 +209 val_209 1 +241 val_241 1 +257 val_257 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +289 val_289 1 +305 val_305 1 +321 val_321 1 +321 val_321 1 +353 val_353 1 +353 val_353 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +449 val_449 1 +481 val_481 1 +497 val_497 1 +PREHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +PREHOOK: type: QUERY +PREHOOK: Input: default@test_table3 +PREHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_table3 +POSTHOOK: Input: default@test_table3@ds=1 +#### A masked pattern was here #### +17 val_17 1 +33 val_33 1 +65 val_65 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +97 val_97 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +113 val_113 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +129 val_129 1 +145 val_145 1 +177 val_177 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +193 val_193 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +209 val_209 1 +241 val_241 1 +257 val_257 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +273 val_273 1 +289 val_289 1 +305 val_305 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +321 val_321 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +353 val_353 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +369 val_369 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +401 val_401 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +417 val_417 1 +449 val_449 1 +481 val_481 1 +497 val_497 1 PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm index f055444..87eaf3d 100644 --- a/ql/src/test/templates/TestCliDriver.vm +++ b/ql/src/test/templates/TestCliDriver.vm @@ -52,7 +52,7 @@ public class $className extends TestCase { System.err.println("Exception: " + e.getMessage()); e.printStackTrace(); System.err.flush(); - fail("Unexpected exception in static initialization"+e.getMessage()); + fail("Unexpected exception in static initialization: "+e.getMessage()); } } diff --git a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java index 1358c5f..c737494 100644 --- a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java +++ b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.shims; import java.io.IOException; +import java.lang.Override; +import java.lang.reflect.Constructor; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.URI; @@ -63,6 +65,7 @@ import org.apache.hadoop.security.KerberosName; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.VersionInfo; @@ -606,9 +609,39 @@ public String getShortName() throws IOException { return kerberosName.getShortName(); } } - + @Override public StoragePolicyShim getStoragePolicyShim(FileSystem fs) { return null; } + + @Override + public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException { + int rc; + + // Creates the command-line parameters for distcp + String[] params = {"-update", "-skipcrccheck", src.toString(), dst.toString()}; + + try { + Class clazzDistCp = Class.forName("org.apache.hadoop.tools.distcp2"); + Constructor c = clazzDistCp.getConstructor(); + c.setAccessible(true); + Tool distcp = (Tool)c.newInstance(); + distcp.setConf(conf); + rc = distcp.run(params); + } catch (ClassNotFoundException e) { + throw new IOException("Cannot find DistCp class package: " + e.getMessage()); + } catch (NoSuchMethodException e) { + throw new IOException("Cannot get DistCp constructor: " + e.getMessage()); + } catch (Exception e) { + throw new IOException("Cannot execute DistCp process: " + e, e); + } + + return (0 == rc) ? true : false; + } + + @Override + public HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException { + return new HadoopShims.NoopHdfsEncryptionShim(); + } } diff --git a/shims/0.23/pom.xml b/shims/0.23/pom.xml index 923814d..87daea8 100644 --- a/shims/0.23/pom.xml +++ b/shims/0.23/pom.xml @@ -145,4 +145,17 @@ test-jar + + + + hadoop-2 + + + org.apache.hadoop + hadoop-distcp + ${hadoop-23.version} + + + + diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index e70b7bb..6902602 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -19,11 +19,13 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.URI; import java.security.AccessControlException; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Comparator; import java.util.HashMap; @@ -35,6 +37,9 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProvider.Options; +import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.DefaultFileAccess; import org.apache.hadoop.fs.FSDataInputStream; @@ -59,6 +64,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobConf; @@ -80,6 +87,7 @@ import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.tez.test.MiniTezCluster; @@ -94,7 +102,6 @@ public class Hadoop23Shims extends HadoopShimsSecure { HadoopShims.MiniDFSShim cluster = null; - final boolean zeroCopy; final boolean storagePolicy; @@ -391,7 +398,14 @@ public void setupConfiguration(Configuration conf) { int numDataNodes, boolean format, String[] racks) throws IOException { - cluster = new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks)); + MiniDFSCluster miniDFSCluster = new MiniDFSCluster(conf, numDataNodes, format, racks); + + // Need to set the client's KeyProvider to the NN's for JKS, + // else the updates do not get flushed properly + miniDFSCluster.getFileSystem().getClient().setKeyProvider( + miniDFSCluster.getNameNode().getNamesystem().getProvider()); + + cluster = new MiniDFSShim(miniDFSCluster); return cluster; } @@ -954,11 +968,11 @@ public String getShortName() throws IOException { } } - + public static class StoragePolicyShim implements HadoopShims.StoragePolicyShim { private final DistributedFileSystem dfs; - + public StoragePolicyShim(DistributedFileSystem fs) { this.dfs = fs; } @@ -979,11 +993,12 @@ public void setStoragePolicy(Path path, StoragePolicyValue policy) /* do nothing */ break; } - default: + default: throw new IllegalArgumentException("Unknown storage policy " + policy); } } } + @Override public HadoopShims.StoragePolicyShim getStoragePolicyShim(FileSystem fs) { @@ -996,4 +1011,180 @@ public void setStoragePolicy(Path path, StoragePolicyValue policy) return null; } } + + @Override + public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException { + int rc; + + // Creates the command-line parameters for distcp + String[] params = {"-update", "-skipcrccheck", src.toString(), dst.toString()}; + + try { + Class clazzDistCp = Class.forName("org.apache.hadoop.tools.DistCp"); + Constructor c = clazzDistCp.getConstructor(); + c.setAccessible(true); + Tool distcp = (Tool)c.newInstance(); + distcp.setConf(conf); + rc = distcp.run(params); + } catch (ClassNotFoundException e) { + throw new IOException("Cannot find DistCp class package: " + e.getMessage()); + } catch (NoSuchMethodException e) { + throw new IOException("Cannot get DistCp constructor: " + e.getMessage()); + } catch (Exception e) { + throw new IOException("Cannot execute DistCp process: " + e, e); + } + + return (0 == rc); + } + + public class HdfsEncryptionShim implements HadoopShims.HdfsEncryptionShim { + private final String HDFS_SECURITY_DEFAULT_CIPHER = "AES/CTR/NoPadding"; + + /** + * Gets information about HDFS encryption zones + */ + private HdfsAdmin hdfsAdmin = null; + + /** + * Used to compare encryption key strengths. + */ + private KeyProvider keyProvider = null; + + private Configuration conf; + + public HdfsEncryptionShim(URI uri, Configuration conf) throws IOException { + DistributedFileSystem dfs = (DistributedFileSystem)FileSystem.get(uri, conf); + + this.conf = conf; + this.keyProvider = dfs.getClient().getKeyProvider(); + this.hdfsAdmin = new HdfsAdmin(uri, conf); + } + + @Override + public boolean isPathEncrypted(Path path) throws IOException { + Path fullPath; + if (path.isAbsolute()) { + fullPath = path; + } else { + fullPath = path.getFileSystem(conf).makeQualified(path); + } + return (hdfsAdmin.getEncryptionZoneForPath(fullPath) != null); + } + + @Override + public boolean arePathsOnSameEncryptionZone(Path path1, Path path2) throws IOException { + EncryptionZone zone1, zone2; + + zone1 = hdfsAdmin.getEncryptionZoneForPath(path1); + zone2 = hdfsAdmin.getEncryptionZoneForPath(path2); + + if (zone1 == null && zone2 == null) { + return true; + } else if (zone1 == null || zone2 == null) { + return false; + } + + return zone1.equals(zone2); + } + + @Override + public int comparePathKeyStrength(Path path1, Path path2) throws IOException { + EncryptionZone zone1, zone2; + + zone1 = hdfsAdmin.getEncryptionZoneForPath(path1); + zone2 = hdfsAdmin.getEncryptionZoneForPath(path2); + + if (zone1 == null && zone2 == null) { + return 0; + } else if (zone1 == null) { + return -1; + } else if (zone2 == null) { + return 1; + } + + return compareKeyStrength(zone1.getKeyName(), zone2.getKeyName()); + } + + @Override + public void createEncryptionZone(Path path, String keyName) throws IOException { + hdfsAdmin.createEncryptionZone(path, keyName); + } + + @Override + public void createKey(String keyName, int bitLength) + throws IOException, NoSuchAlgorithmException { + + checkKeyProvider(); + + if (keyProvider.getMetadata(keyName) == null) { + final KeyProvider.Options options = new Options(this.conf); + options.setCipher(HDFS_SECURITY_DEFAULT_CIPHER); + options.setBitLength(bitLength); + keyProvider.createKey(keyName, options); + keyProvider.flush(); + } else { + throw new IOException("key '" + keyName + "' already exists"); + } + } + + @Override + public void deleteKey(String keyName) throws IOException { + checkKeyProvider(); + + if (keyProvider.getMetadata(keyName) != null) { + keyProvider.deleteKey(keyName); + keyProvider.flush(); + } else { + throw new IOException("key '" + keyName + "' does not exist."); + } + } + + @Override + public List getKeys() throws IOException { + checkKeyProvider(); + return keyProvider.getKeys(); + } + + private void checkKeyProvider() throws IOException { + if (keyProvider == null) { + throw new IOException("HDFS security key provider is not configured on your server."); + } + } + + /** + * Compares two encryption key strengths. + * + * @param keyname1 Keyname to compare + * @param keyname2 Keyname to compare + * @return 1 if path1 is stronger; 0 if paths are equals; -1 if path1 is weaker. + * @throws IOException If an error occurred attempting to get key metadata + */ + private int compareKeyStrength(String keyname1, String keyname2) throws IOException { + KeyProvider.Metadata meta1, meta2; + + if (keyProvider == null) { + throw new IOException("HDFS security key provider is not configured on your server."); + } + + meta1 = keyProvider.getMetadata(keyname1); + meta2 = keyProvider.getMetadata(keyname2); + + if (meta1.getBitLength() < meta2.getBitLength()) { + return -1; + } else if (meta1.getBitLength() == meta2.getBitLength()) { + return 0; + } else { + return 1; + } + } + } + + @Override + public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException { + URI uri = fs.getUri(); + if ("hdfs".equals(uri.getScheme())) { + return new HdfsEncryptionShim(uri, conf); + } + return new HadoopShims.NoopHdfsEncryptionShim(); + } } diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index 279f029..064304c 100644 --- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -23,12 +23,19 @@ import java.net.URI; import java.nio.ByteBuffer; import java.security.AccessControlException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivilegedExceptionAction; import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import javax.security.auth.login.LoginException; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; @@ -567,4 +574,131 @@ public void checkFileAccess(FileSystem fs, FileStatus status, FsAction action) public String getShortName() throws IOException; } + /** + * Copies a source dir/file to a destination by orchestrating the copy between hdfs nodes. + * This distributed process is meant to copy huge files that could take some time if a single + * copy is done. + * + * @param src Path to the source file or directory to copy + * @param dst Path to the destination file or directory + * @param conf The hadoop configuration object + * @return True if it is successfull; False otherwise. + */ + public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException; + + /** + * This interface encapsulates methods used to get encryption information from + * HDFS paths. + */ + public interface HdfsEncryptionShim { + /** + * Checks if a given HDFS path is encrypted. + * + * @param path Path to HDFS file system + * @return True if it is encrypted; False otherwise. + * @throws IOException If an error occurred attempting to get encryption information + */ + public boolean isPathEncrypted(Path path) throws IOException; + + /** + * Checks if two HDFS paths are on the same encrypted or unencrypted zone. + * + * @param path1 Path to HDFS file system + * @param path2 Path to HDFS file system + * @return True if both paths are in the same zone; False otherwise. + * @throws IOException If an error occurred attempting to get encryption information + */ + public boolean arePathsOnSameEncryptionZone(Path path1, Path path2) throws IOException; + + /** + * Compares two encrypted path strengths. + * + * @param path1 HDFS path to compare. + * @param path2 HDFS path to compare. + * @return 1 if path1 is stronger; 0 if paths are equals; -1 if path1 is weaker. + * @throws IOException If an error occurred attempting to get encryption/key metadata + */ + public int comparePathKeyStrength(Path path1, Path path2) throws IOException; + + /** + * create encryption zone by path and keyname + * @param path HDFS path to create encryption zone + * @param keyName keyname + * @throws IOException + */ + @VisibleForTesting + public void createEncryptionZone(Path path, String keyName) throws IOException; + + /** + * Creates an encryption key. + * + * @param keyName Name of the key + * @param bitLength Key encryption length in bits (128 or 256). + * @throws IOException If an error occurs while creating the encryption key + * @throws NoSuchAlgorithmException If cipher algorithm is invalid. + */ + @VisibleForTesting + public void createKey(String keyName, int bitLength) + throws IOException, NoSuchAlgorithmException; + + @VisibleForTesting + public void deleteKey(String keyName) throws IOException; + + @VisibleForTesting + public List getKeys() throws IOException; + } + + /** + * This is a dummy class used when the hadoop version does not support hdfs encryption. + */ + public static class NoopHdfsEncryptionShim implements HdfsEncryptionShim { + @Override + public boolean isPathEncrypted(Path path) throws IOException { + /* not supported */ + return false; + } + + @Override + public boolean arePathsOnSameEncryptionZone(Path path1, Path path2) throws IOException { + /* not supported */ + return true; + } + + @Override + public int comparePathKeyStrength(Path path1, Path path2) throws IOException { + /* not supported */ + return 0; + } + + @Override + public void createEncryptionZone(Path path, String keyName) { + /* not supported */ + } + + @Override + public void createKey(String keyName, int bitLength) { + /* not supported */ + } + + @Override + public void deleteKey(String keyName) throws IOException { + /* not supported */ + } + + @Override + public List getKeys() throws IOException{ + /* not supported */ + return null; + } + } + + /** + * Returns a new instance of the HdfsEncryption shim. + * + * @param fs A FileSystem object to HDFS + * @param conf A Configuration object + * @return A new instance of the HdfsEncryption shim. + * @throws IOException If an error occurred while creating the instance. + */ + public HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException; }