diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index d86ff58840..70d0749093 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -570,6 +570,7 @@ minillaplocal.query.files=\ list_bucket_dml_10.q,\ llap_partitioned.q,\ llap_vector_nohybridgrace.q,\ + load_data_acid_rename.q,\ load_dyn_part5.q,\ lvj_mapjoin.q,\ materialized_view_create_rewrite_dummy.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 63bcedc000..a26a7a8ddc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -31,18 +31,8 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.Map.Entry; -import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; @@ -1881,11 +1871,12 @@ else if(!isAcidIUDoperation && isFullAcidTable) { // base_x. (there is Insert Overwrite and Load Data Overwrite) boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), - isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite?true:false); + isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite); } else { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, - (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles); + (loadFileType == LoadFileType.OVERWRITE_EXISTING), newFiles, tbl.getNumBuckets() > 0 ? true: false, + isFullAcidTable); } } perfLogger.PerfLogEnd("MoveTask", "FileMoves"); @@ -2432,7 +2423,8 @@ else if(!isAcidIUDoperation && isFullAcidTable) { try { FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, - loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles); + loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles, + tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable); } catch (IOException e) { throw new HiveException("addFiles: filesystem error in check phase", e); } @@ -3289,8 +3281,9 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, } private static void copyFiles(final HiveConf conf, final FileSystem destFs, - FileStatus[] srcs, final FileSystem srcFs, final Path destf, final boolean isSrcLocal, - boolean isOverwrite, final List newFiles) throws HiveException { + FileStatus[] srcs, final FileSystem srcFs, final Path destf, + final boolean isSrcLocal, boolean isOverwrite, + final List newFiles, boolean acidRename) throws HiveException { final HdfsUtils.HadoopFileStatus fullDestStatus; try { @@ -3306,6 +3299,12 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null; + // For ACID non-bucketed case, the filenames have to be in the format consistent with INSERT/UPDATE/DELETE Ops, + // i.e, like 000000_0, 000001_0_copy_1, 000002_0.gz etc. + // The extension is only maintained for files which are compressed. + int taskId = 0; + // Sort the files + Arrays.sort(srcs); for (FileStatus src : srcs) { FileStatus[] files; if (src.isDirectory()) { @@ -3320,6 +3319,8 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, } final SessionState parentSession = SessionState.get(); + // Sort the files + Arrays.sort(files); for (final FileStatus srcFile : files) { final Path srcP = srcFile.getPath(); final boolean needToCopy = needToCopy(srcP, destf, srcFs, destFs); @@ -3333,7 +3334,8 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, // copy from source to destination, we will inherit the destination's parent group ownership. if (null == pool) { try { - Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed); + Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed, + acidRename ? taskId++ : -1); if (null != newFiles) { newFiles.add(destPath); @@ -3342,6 +3344,8 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, throw getHiveException(e, msg, "Failed to move: {}"); } } else { + // future only takes final or seemingly final values. Make a final copy of taskId + final int finalTaskId = acidRename ? taskId++ : -1; futures.add(pool.submit(new Callable>() { @Override public ObjectPair call() throws HiveException { @@ -3349,7 +3353,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, try { Path destPath = - mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed); + mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isOverwrite, isRenameAllowed, finalTaskId); if (null != newFiles) { newFiles.add(destPath); @@ -3419,6 +3423,10 @@ private static Path getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileSys return ShimLoader.getHadoopShims().getPathWithoutSchemeAndAuthority(path); } + private static String getPathName(int taskId) { + return Utilities.replaceTaskId("000000", taskId) + "_0"; + } + /** *

* Moves a file from one {@link Path} to another. If {@code isRenameAllowed} is true then the @@ -3446,15 +3454,22 @@ private static Path getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileSys * @throws IOException if there was an issue moving the file */ private static Path mvFile(HiveConf conf, FileSystem sourceFs, Path sourcePath, FileSystem destFs, Path destDirPath, - boolean isSrcLocal, boolean isOverwrite, boolean isRenameAllowed) throws IOException { + boolean isSrcLocal, boolean isOverwrite, boolean isRenameAllowed, + int taskId) throws IOException { // Strip off the file type, if any so we don't make: // 000000_0.gz -> 000000_0.gz_copy_1 final String fullname = sourcePath.getName(); - final String name = FilenameUtils.getBaseName(sourcePath.getName()); + final String name; + if (taskId == -1) { // non-acid + name = FilenameUtils.getBaseName(sourcePath.getName()); + } else { // acid + name = getPathName(taskId); + } final String type = FilenameUtils.getExtension(sourcePath.getName()); - Path destFilePath = new Path(destDirPath, fullname); + // Incase of ACID, the file is ORC so the extension is not relevant and should not be inherited. + Path destFilePath = new Path(destDirPath, taskId == -1 ? fullname : name); /* * The below loop may perform bad when the destination file already exists and it has too many _copy_ @@ -3469,7 +3484,8 @@ private static Path mvFile(HiveConf conf, FileSystem sourceFs, Path sourcePath, destFs.delete(destFilePath, false); break; } - destFilePath = new Path(destDirPath, name + (Utilities.COPY_KEYWORD + counter) + (!type.isEmpty() ? "." + type : "")); + destFilePath = new Path(destDirPath, name + (Utilities.COPY_KEYWORD + counter) + + ((taskId == -1 && !type.isEmpty()) ? "." + type : "")); } if (isRenameAllowed) { @@ -3764,7 +3780,8 @@ static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, Fil */ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem fs, boolean isSrcLocal, boolean isAcid, - boolean isOverwrite, List newFiles) throws HiveException { + boolean isOverwrite, List newFiles, boolean isBucketed, + boolean isFullAcidTable) throws HiveException { try { // create the destination if it does not exist if (!fs.exists(destf)) { @@ -3796,7 +3813,11 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem if (isAcid) { moveAcidFiles(srcFs, srcs, destf, newFiles); } else { - copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, isOverwrite, newFiles); + // For ACID non-bucketed case, the filenames have to be in the format consistent with INSERT/UPDATE/DELETE Ops, + // i.e, like 000000_0, 000001_0_copy_1, 000002_0.gz etc. + // The extension is only maintained for files which are compressed. + copyFiles(conf, fs, srcs, srcFs, destf, isSrcLocal, isOverwrite, + newFiles, isFullAcidTable && !isBucketed); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 5868d4dd56..54f5bab6de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -159,14 +159,6 @@ private URI initializeFromURI(String fromPath, boolean isLocal) throws IOExcepti throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast, "source contains directory: " + oneSrc.getPath().toString())); } - if(AcidUtils.isAcidTable(table)) { - if(!AcidUtils.originalBucketFilter.accept(oneSrc.getPath())) { - //acid files (e.g. bucket_0000) have ROW_ID embedded in them and so can't be simply - //copied to a table so only allow non-acid files for now - throw new SemanticException(ErrorMsg.ACID_LOAD_DATA_INVALID_FILE_NAME, - oneSrc.getPath().getName(), table.getFullyQualifiedName()); - } - } } } catch (IOException e) { // Has to use full name to make sure it does not conflict with diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java index a9cba456ef..a141ab1691 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnLoadData.java @@ -264,10 +264,10 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti //from Load Data into acid converted table {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000024_0000024_0000/000000_0"}, {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000024_0000024_0000/000000_0"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":2}\t2\t2", "t/delta_0000024_0000024_0000/000000_0_copy_1"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":3}\t3\t3", "t/delta_0000024_0000024_0000/000000_0_copy_1"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":4}\t4\t4", "t/delta_0000024_0000024_0000/000000_0_copy_2"}, - {"{\"transactionid\":24,\"bucketid\":536870912,\"rowid\":5}\t5\t5", "t/delta_0000024_0000024_0000/000000_0_copy_2"}, + {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":0}\t2\t2", "t/delta_0000024_0000024_0000/000001_0"}, + {"{\"transactionid\":24,\"bucketid\":536936448,\"rowid\":1}\t3\t3", "t/delta_0000024_0000024_0000/000001_0"}, + {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":0}\t4\t4", "t/delta_0000024_0000024_0000/000002_0"}, + {"{\"transactionid\":24,\"bucketid\":537001984,\"rowid\":1}\t5\t5", "t/delta_0000024_0000024_0000/000002_0"}, }; checkResult(expected, testQuery, isVectorized, "load data inpath"); @@ -281,7 +281,7 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti String[][] expected2 = new String[][] { {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000030/000000_0"}, {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000030/000000_0"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":2}\t8\t8", "t/base_0000030/000000_0_copy_1"} + {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000030/000001_0"} }; checkResult(expected2, testQuery, isVectorized, "load data inpath overwrite"); @@ -293,7 +293,7 @@ private void loadDataNonAcid2AcidConversion(boolean isVectorized) throws Excepti String[][] expected3 = new String[][] { {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":0}\t5\t6", "t/base_0000033/bucket_00000"}, {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":1}\t7\t8", "t/base_0000033/bucket_00000"}, - {"{\"transactionid\":30,\"bucketid\":536870912,\"rowid\":2}\t8\t8", "t/base_0000033/bucket_00000"}, + {"{\"transactionid\":30,\"bucketid\":536936448,\"rowid\":0}\t8\t8", "t/base_0000033/bucket_00001"}, {"{\"transactionid\":33,\"bucketid\":536870912,\"rowid\":0}\t9\t9", "t/base_0000033/bucket_00000"} }; @@ -457,6 +457,9 @@ public void testAbort() throws Exception { */ private void checkResult(String[][] expectedResult, String query, boolean isVectorized, String msg) throws Exception{ List rs = runStatementOnDriver(query); + for (String result : rs) { + System.out.println(result); + } checkExpected(rs, expectedResult, msg + (isVectorized ? " vect" : ""), LOG, !isVectorized); assertVectorized(isVectorized, query); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java index c6a4a8926b..a20a2ae3ce 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveCopyFiles.java @@ -83,7 +83,7 @@ public void testRenameNewFilesOnSameFileSystem() throws IOException { FileSystem targetFs = targetPath.getFileSystem(hiveConf); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false,null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false,null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); @@ -107,7 +107,7 @@ public void testRenameExistingFilesOnSameFileSystem() throws IOException { FileSystem targetFs = targetPath.getFileSystem(hiveConf); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false, null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false, null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); @@ -127,7 +127,7 @@ public void testRenameExistingFilesOnSameFileSystem() throws IOException { sourceFolder.newFile("000001_0.gz"); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false, null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, targetFs, isSourceLocal, NO_ACID, false, null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); @@ -158,7 +158,7 @@ public void testCopyNewFilesOnDifferentFileSystem() throws IOException { Mockito.when(spyTargetFs.getUri()).thenReturn(URI.create("hdfs://" + targetPath.toUri().getPath())); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); @@ -185,7 +185,7 @@ public void testCopyExistingFilesOnDifferentFileSystem() throws IOException { Mockito.when(spyTargetFs.getUri()).thenReturn(URI.create("hdfs://" + targetPath.toUri().getPath())); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); @@ -205,7 +205,7 @@ public void testCopyExistingFilesOnDifferentFileSystem() throws IOException { sourceFolder.newFile("000001_0.gz"); try { - Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null); + Hive.copyFiles(hiveConf, sourcePath, targetPath, spyTargetFs, isSourceLocal, NO_ACID, false, null, false, false); } catch (HiveException e) { e.printStackTrace(); assertTrue("Hive.copyFiles() threw an unexpected exception.", false); diff --git a/ql/src/test/queries/clientnegative/load_data_into_acid.q b/ql/src/test/queries/clientnegative/load_data_into_acid.q deleted file mode 100644 index 2ac5b561ae..0000000000 --- a/ql/src/test/queries/clientnegative/load_data_into_acid.q +++ /dev/null @@ -1,20 +0,0 @@ -set hive.support.concurrency=true; -set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; - -create table acid_ivot( - ctinyint TINYINT, - csmallint SMALLINT, - cint INT, - cbigint BIGINT, - cfloat FLOAT, - cdouble DOUBLE, - cstring1 STRING, - cstring2 STRING, - ctimestamp1 TIMESTAMP, - ctimestamp2 TIMESTAMP, - cboolean1 BOOLEAN, - cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true'); - -LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot; - - diff --git a/ql/src/test/queries/clientpositive/load_data_acid_rename.q b/ql/src/test/queries/clientpositive/load_data_acid_rename.q new file mode 100644 index 0000000000..b21bc5ef3d --- /dev/null +++ b/ql/src/test/queries/clientpositive/load_data_acid_rename.q @@ -0,0 +1,12 @@ +set hive.mapred.mode=nonstrict; + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +drop table if exists acid_rename; +create table acid_rename (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc TBLPROPERTIES ("transactional"="true"); +load data local inpath '../../data/files/orc_split_elim.orc' into table acid_rename; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/acid_rename/*/000000_0; + +drop table acid_rename; diff --git a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q index 4a6afb0496..fed931c897 100644 --- a/ql/src/test/queries/clientpositive/smb_mapjoin_7.q +++ b/ql/src/test/queries/clientpositive/smb_mapjoin_7.q @@ -16,8 +16,8 @@ create table smb_join_results(k1 int, v1 string, k2 int, v2 string); create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string); create table normal_join_results(k1 int, v1 string, k2 int, v2 string); -load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1; -load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1; +load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1; +load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1; insert overwrite table smb_bucket4_2 select * from src; diff --git a/ql/src/test/results/clientnegative/load_data_into_acid.q.out b/ql/src/test/results/clientnegative/load_data_into_acid.q.out deleted file mode 100644 index 46b5cdd2c8..0000000000 --- a/ql/src/test/results/clientnegative/load_data_into_acid.q.out +++ /dev/null @@ -1,33 +0,0 @@ -PREHOOK: query: create table acid_ivot( - ctinyint TINYINT, - csmallint SMALLINT, - cint INT, - cbigint BIGINT, - cfloat FLOAT, - cdouble DOUBLE, - cstring1 STRING, - cstring2 STRING, - ctimestamp1 TIMESTAMP, - ctimestamp2 TIMESTAMP, - cboolean1 BOOLEAN, - cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@acid_ivot -POSTHOOK: query: create table acid_ivot( - ctinyint TINYINT, - csmallint SMALLINT, - cint INT, - cbigint BIGINT, - cfloat FLOAT, - cdouble DOUBLE, - cstring1 STRING, - cstring2 STRING, - ctimestamp1 TIMESTAMP, - ctimestamp2 TIMESTAMP, - cboolean1 BOOLEAN, - cboolean2 BOOLEAN) stored as orc TBLPROPERTIES ('transactional'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@acid_ivot -FAILED: SemanticException [Error 30023]: alltypesorc file name is not valid in Load Data into Acid table default.acid_ivot. Examples of valid names are: 00000_0, 00000_0_copy_1 diff --git a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out index 7a6f8c53a5..4b1313dc93 100644 --- a/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out +++ b/ql/src/test/results/clientpositive/beeline/smb_mapjoin_7.q.out @@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@normal_join_results -PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1 -PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1 diff --git a/ql/src/test/results/clientpositive/llap/load_data_acid_rename.q.out b/ql/src/test/results/clientpositive/llap/load_data_acid_rename.q.out new file mode 100644 index 0000000000..b915cdbd31 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/load_data_acid_rename.q.out @@ -0,0 +1,29 @@ +PREHOOK: query: drop table if exists acid_rename +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists acid_rename +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table acid_rename (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_rename +POSTHOOK: query: create table acid_rename (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_rename +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table acid_rename +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@acid_rename +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table acid_rename +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@acid_rename +#### A masked pattern was here #### +PREHOOK: query: drop table acid_rename +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@acid_rename +PREHOOK: Output: default@acid_rename +POSTHOOK: query: drop table acid_rename +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@acid_rename +POSTHOOK: Output: default@acid_rename diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out index b71c5b87c1..83033b07c0 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out @@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@normal_join_results -PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1 -PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1 diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out index ac49c02913..610abab91b 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_7.q.out @@ -38,19 +38,19 @@ POSTHOOK: query: create table normal_join_results(k1 int, v1 string, k2 int, v2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@normal_join_results -PREHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000000_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1 -PREHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +PREHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@smb_bucket4_1 -POSTHOOK: query: load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1 +POSTHOOK: query: load data local inpath '../../data/files/empty/000001_0' into table smb_bucket4_1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket4_1