diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 8beef09..699c7bf 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -103,7 +103,7 @@ protected void setUp() { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false); + db.loadTable(hadoopDataFile[i], src, false, false, false); i++; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index a190155..559ebb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -238,13 +238,13 @@ public int execute(DriverContext driverContext) { // Get all files from the src directory FileStatus[] dirs; ArrayList files; - FileSystem fs; + FileSystem srcFs; // source filesystem try { - fs = table.getDataLocation().getFileSystem(conf); - dirs = fs.globStatus(tbd.getSourcePath()); + srcFs = tbd.getSourcePath().getFileSystem(conf); + dirs = srcFs.globStatus(tbd.getSourcePath()); files = new ArrayList(); for (int i = 0; (dirs != null && i < dirs.length); i++) { - files.addAll(Arrays.asList(fs.listStatus(dirs[i].getPath()))); + files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath()))); // We only check one file, so exit the loop when we have at least // one. if (files.size() > 0) { @@ -258,7 +258,7 @@ public int execute(DriverContext driverContext) { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { // Check if the file format of the file matches that of the table. boolean flag = HiveFileFormatUtils.checkInputFormat( - fs, conf, tbd.getTable().getInputFileFormatClass(), files); + srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); if (!flag) { throw new HiveException( "Wrong file format. Please check the file's format."); @@ -271,7 +271,7 @@ public int execute(DriverContext driverContext) { if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); db.loadTable(tbd.getSourcePath(), tbd.getTable() - .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime()); + .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime(), work.isSrcLocal()); if (work.getOutputs() != null) { work.getOutputs().add(new WriteEntity(table, (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : @@ -400,11 +400,13 @@ public int execute(DriverContext driverContext) { db.validatePartitionNameCharacters(partVals); db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(), - tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd)); - Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); + tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal()); + Partition partn = db.getPartition(table, tbd.getPartitionSpec(), + false); - if (bucketCols != null || sortCols != null) { - updatePartitionBucketSortColumns(table, partn, bucketCols, numBuckets, sortCols); + if (bucketCols != null || sortCols != null) { + updatePartitionBucketSortColumns(table, partn, bucketCols, + numBuckets, sortCols); } dc = new DataContainer(table.getTTable(), partn.getTPartition()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e10bdb4..adc2560 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1184,12 +1184,15 @@ public Database getDatabaseCurrent() throws HiveException { * @param holdDDLTime if true, force [re]create the partition * @param inheritTableSpecs if true, on [re]creating the partition, take the * location/inputformat/outputformat/serde details from table spec + * @param isSrcLocal + * If the source directory is LOCAL */ public void loadPartition(Path loadPath, String tableName, Map partSpec, boolean replace, boolean holdDDLTime, - boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir) - throws HiveException { + boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, + boolean isSrcLocal) throws HiveException { Table tbl = getTable(tableName); + Path tblDataLocationPath = tbl.getDataLocation(); try { /** * Move files before creating the partition since down stream processes @@ -1209,7 +1212,7 @@ public void loadPartition(Path loadPath, String tableName, if (inheritTableSpecs) { Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartPath(partSpec)); - newPartPath = new Path(loadPath.toUri().getScheme(), loadPath.toUri().getAuthority(), + newPartPath = new Path(tblDataLocationPath.toUri().getScheme(), tblDataLocationPath.toUri().getAuthority(), partPath.toUri().getPath()); if(oldPart != null) { @@ -1231,10 +1234,11 @@ public void loadPartition(Path loadPath, String tableName, } if (replace) { - Hive.replaceFiles(loadPath, newPartPath, oldPartPath, getConf()); + Hive.replaceFiles(loadPath, newPartPath, oldPartPath, getConf(), + isSrcLocal); } else { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); - Hive.copyFiles(conf, loadPath, newPartPath, fs); + Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal); } // recreate the partition if it existed before @@ -1422,7 +1426,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // finally load the partition -- move the file to the final table address loadPartition(partPath, tableName, fullPartSpec, replace, holdDDLTime, true, - listBucketingEnabled); + listBucketingEnabled, false); LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); } return fullPartSpecs; @@ -1444,15 +1448,16 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param replace * if true - replace files in the table, otherwise add files to table * @param holdDDLTime + * @param isSrcLocal + * If the source directory is LOCAL */ public void loadTable(Path loadPath, String tableName, boolean replace, - boolean holdDDLTime) throws HiveException { + boolean holdDDLTime, boolean isSrcLocal) throws HiveException { Table tbl = getTable(tableName); - if (replace) { - tbl.replaceFiles(loadPath); + tbl.replaceFiles(loadPath, isSrcLocal); } else { - tbl.copyFiles(loadPath); + tbl.copyFiles(loadPath, isSrcLocal); } if (!holdDDLTime) { @@ -2098,9 +2103,9 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, } // for each file or directory in 'srcs', make mapping for every file in src to safe name in dest - private static List> checkPaths(HiveConf conf, - FileSystem fs, FileStatus[] srcs, Path destf, - boolean replace) throws HiveException { + private static List> checkPaths(HiveConf conf, FileSystem fs, + FileStatus[] srcs, FileSystem srcFs, Path destf, boolean replace) + throws HiveException { List> result = new ArrayList>(); try { @@ -2112,7 +2117,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, for (FileStatus src : srcs) { FileStatus[] items; if (src.isDir()) { - items = fs.listStatus(src.getPath()); + items = srcFs.listStatus(src.getPath()); Arrays.sort(items); } else { items = new FileStatus[] {src}; @@ -2127,7 +2132,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType, // This check is redundant because temp files are removed by // execution layer before // calling loadTable/Partition. But leaving it in just in case. - fs.delete(itemSource, true); + srcFs.delete(itemSource, true); continue; } @@ -2189,8 +2194,8 @@ private static boolean destExists(List> result, Path proposed) { //method is called. when the replace value is true, this method works a little different //from mv command if the destf is a directory, it replaces the destf instead of moving under //the destf. in this case, the replaced destf still preserves the original destf's permission - static protected boolean renameFile(HiveConf conf, Path srcf, Path destf, FileSystem fs, - boolean replace) throws HiveException { + static protected boolean renameFile(HiveConf conf, Path srcf, Path destf, + FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException { boolean success = false; boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); @@ -2222,11 +2227,18 @@ static protected boolean renameFile(HiveConf conf, Path srcf, Path destf, FileSy } } } - success = fs.rename(srcf, destf); + if (!isSrcLocal) { + // For NOT local src file, rename the file + success = fs.rename(srcf, destf); + } else { + // For local src file, copy to hdfs + fs.copyFromLocalFile(srcf, destf); + success = true; + } LOG.info((replace ? "Replacing src:" : "Renaming src:") + srcf.toString() + ";dest: " + destf.toString() + ";Status:" + success); } catch (IOException ioe) { - throw new HiveException("Unable to move source" + srcf + " to destination " + destf, ioe); + throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe); } if (success && inheritPerms) { @@ -2243,8 +2255,8 @@ static protected boolean renameFile(HiveConf conf, Path srcf, Path destf, FileSy return success; } - static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem fs) - throws HiveException { + static protected void copyFiles(HiveConf conf, Path srcf, Path destf, + FileSystem fs, boolean isSrcLocal) throws HiveException { boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); try { @@ -2262,8 +2274,10 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem } FileStatus[] srcs; + FileSystem srcFs; try { - srcs = fs.globStatus(srcf); + srcFs = srcf.getFileSystem(conf); + srcs = srcFs.globStatus(srcf); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException("addFiles: filesystem error in check phase", e); @@ -2274,14 +2288,14 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem // srcs = new FileStatus[0]; Why is this needed? } // check that source and target paths exist - List> result = checkPaths(conf, fs, srcs, destf, false); - + List> result = checkPaths(conf, fs, srcs, srcFs, destf, false); // move it, move it try { for (List sdpairs : result) { for (Path[] sdpair : sdpairs) { - if (!renameFile(conf, sdpair[0], sdpair[1], fs, false)) { - throw new IOException("Cannot move " + sdpair[0] + " to " + sdpair[1]); + if (!renameFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) { + throw new IOException("Cannot move " + sdpair[0] + " to " + + sdpair[1]); } } } @@ -2304,18 +2318,22 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, FileSystem * The directory where the final data needs to go * @param oldPath * The directory where the old data location, need to be cleaned up. + * @param isSrcLocal + * If the source directory is LOCAL */ - static protected void replaceFiles(Path srcf, Path destf, Path oldPath, HiveConf conf) - throws HiveException { + static protected void replaceFiles(Path srcf, Path destf, Path oldPath, + HiveConf conf, boolean isSrcLocal) throws HiveException { try { - FileSystem fs = srcf.getFileSystem(conf); + FileSystem destFs = destf.getFileSystem(conf); boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); // check if srcf contains nested sub-directories FileStatus[] srcs; + FileSystem srcFs; try { - srcs = fs.globStatus(srcf); + srcFs = srcf.getFileSystem(conf); + srcs = srcFs.globStatus(srcf); } catch (IOException e) { throw new HiveException("Getting globStatus " + srcf.toString(), e); } @@ -2323,7 +2341,8 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, HiveConf LOG.info("No sources specified to move: " + srcf); return; } - List> result = checkPaths(conf, fs, srcs, destf, true); + List> result = checkPaths(conf, destFs, srcs, srcFs, destf, + true); if (oldPath != null) { try { @@ -2344,35 +2363,37 @@ static protected void replaceFiles(Path srcf, Path destf, Path oldPath, HiveConf if (srcs.length == 1 && srcs[0].isDir()) { // rename can fail if the parent doesn't exist Path destfp = destf.getParent(); - if (!fs.exists(destfp)) { - boolean success = fs.mkdirs(destfp); + if (!destFs.exists(destfp)) { + boolean success = destFs.mkdirs(destfp); if (!success) { LOG.warn("Error creating directory " + destf.toString()); } if (inheritPerms && success) { - fs.setPermission(destfp, fs.getFileStatus(destfp.getParent()).getPermission()); + destFs.setPermission(destfp, destFs.getFileStatus(destfp.getParent()).getPermission()); } } - boolean b = renameFile(conf, srcs[0].getPath(), destf, fs, true); + boolean b = renameFile(conf, srcs[0].getPath(), destf, destFs, true, + isSrcLocal); if (!b) { throw new HiveException("Unable to move results from " + srcs[0].getPath() + " to destination directory: " + destf); } } else { // srcf is a file or pattern containing wildcards - if (!fs.exists(destf)) { - boolean success = fs.mkdirs(destf); + if (!destFs.exists(destf)) { + boolean success = destFs.mkdirs(destf); if (!success) { LOG.warn("Error creating directory " + destf.toString()); } if (inheritPerms && success) { - fs.setPermission(destf, fs.getFileStatus(destf.getParent()).getPermission()); + destFs.setPermission(destf, destFs.getFileStatus(destf.getParent()).getPermission()); } } // srcs must be a list of files -- ensured by LoadSemanticAnalyzer for (List sdpairs : result) { for (Path[] sdpair : sdpairs) { - if (!renameFile(conf, sdpair[0], sdpair[1], fs, true)) { + if (!renameFile(conf, sdpair[0], sdpair[1], destFs, true, + isSrcLocal)) { throw new IOException("Error moving: " + sdpair[0] + " into: " + sdpair[1]); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 8318be1..45ad315 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -655,10 +655,14 @@ public int getNumBuckets() { * * @param srcf * Source directory + * @param isSrcLocal + * If the source directory is LOCAL */ - protected void replaceFiles(Path srcf) throws HiveException { + protected void replaceFiles(Path srcf, boolean isSrcLocal) + throws HiveException { Path tableDest = getPath(); - Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf()); + Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf(), + isSrcLocal); } /** @@ -666,12 +670,14 @@ protected void replaceFiles(Path srcf) throws HiveException { * * @param srcf * Files to be moved. Leaf directories or globbed file paths + * @param isSrcLocal + * If the source directory is LOCAL */ - protected void copyFiles(Path srcf) throws HiveException { + protected void copyFiles(Path srcf, boolean isSrcLocal) throws HiveException { FileSystem fs; try { fs = getDataLocation().getFileSystem(Hive.get().getConf()); - Hive.copyFiles(Hive.get().getConf(), srcf, getPath(), fs); + Hive.copyFiles(Hive.get().getConf(), srcf, getPath(), fs, isSrcLocal); } catch (IOException e) { throw new HiveException("addFiles: filesystem error in check phase", e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 3dd0f6f..6c53447 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; -import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.StatsWork; @@ -234,18 +233,6 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { inputs.add(new ReadEntity(new Path(fromURI), isLocal)); Task rTask = null; - // create copy work - if (isLocal) { - // if the local keyword is specified - we will always make a copy. this - // might seem redundant in the case - // that the hive warehouse is also located in the local file system - but - // that's just a test case. - String copyURIStr = ctx.getExternalTmpPath(toURI).toString(); - URI copyURI = URI.create(copyURIStr); - rTask = TaskFactory.get(new CopyWork(new Path(fromURI), new Path(copyURI)), conf); - fromURI = copyURI; - } - // create final load/move work Map partSpec = ts.getPartSpec(); @@ -281,7 +268,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite); Task childTask = TaskFactory.get(new MoveWork(getInputs(), - getOutputs(), loadTableWork, null, true), conf); + getOutputs(), loadTableWork, null, true, isLocal), conf); if (rTask != null) { rTask.addDependentTask(childTask); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index 407450e..e43156f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -38,6 +38,7 @@ private LoadMultiFilesDesc loadMultiFilesWork; private boolean checkFileFormat; + private boolean srcLocal; /** * ReadEntitites that are passed to the hooks. @@ -63,6 +64,16 @@ public MoveWork(HashSet inputs, HashSet outputs) { public MoveWork(HashSet inputs, HashSet outputs, final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, + boolean checkFileFormat, boolean srcLocal) { + this(inputs, outputs); + this.loadTableWork = loadTableWork; + this.loadFileWork = loadFileWork; + this.checkFileFormat = checkFileFormat; + this.srcLocal = srcLocal; + } + + public MoveWork(HashSet inputs, HashSet outputs, + final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, boolean checkFileFormat) { this(inputs, outputs); this.loadTableWork = loadTableWork; @@ -121,4 +132,12 @@ public void setOutputs(HashSet outputs) { this.outputs = outputs; } + public boolean isSrcLocal() { + return srcLocal; + } + + public void setSrcLocal(boolean srcLocal) { + this.srcLocal = srcLocal; + } + } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index 5991aae..91efb58 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -125,7 +125,7 @@ db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false); + db.loadTable(hadoopDataFile[i], src, false, false, true); i++; } diff --git a/ql/src/test/queries/clientpositive/load_local_dir_test.q b/ql/src/test/queries/clientpositive/load_local_dir_test.q new file mode 100644 index 0000000..863afee --- /dev/null +++ b/ql/src/test/queries/clientpositive/load_local_dir_test.q @@ -0,0 +1,6 @@ + +create table load_local (id INT); + +load data local inpath '../../data/files/ext_test/' into table load_local; + +select * from load_local; diff --git a/ql/src/test/results/clientpositive/input4.q.out b/ql/src/test/results/clientpositive/input4.q.out index 9b169f9..7fd1645 100644 --- a/ql/src/test/results/clientpositive/input4.q.out +++ b/ql/src/test/results/clientpositive/input4.q.out @@ -14,14 +14,9 @@ POSTHOOK: type: LOAD STAGE DEPENDENCIES: Stage-0 is a root stage Stage-1 depends on stages: Stage-0 - Stage-2 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-0 - Copy -#### A masked pattern was here #### - - Stage: Stage-1 Move Operator tables: replace: false @@ -31,7 +26,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.input4 - Stage: Stage-2 + Stage: Stage-1 Stats-Aggr Operator PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4 diff --git a/ql/src/test/results/clientpositive/load_local_dir_test.q.out b/ql/src/test/results/clientpositive/load_local_dir_test.q.out new file mode 100644 index 0000000..4492255 --- /dev/null +++ b/ql/src/test/results/clientpositive/load_local_dir_test.q.out @@ -0,0 +1,29 @@ +PREHOOK: query: create table load_local (id INT) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +POSTHOOK: query: create table load_local (id INT) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load_local +PREHOOK: query: load data local inpath '../../data/files/ext_test/' into table load_local +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load_local +POSTHOOK: query: load data local inpath '../../data/files/ext_test/' into table load_local +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load_local +PREHOOK: query: select * from load_local +PREHOOK: type: QUERY +PREHOOK: Input: default@load_local +#### A masked pattern was here #### +POSTHOOK: query: select * from load_local +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load_local +#### A masked pattern was here #### +1 +2 +3 +4 +5 +6 diff --git a/ql/src/test/results/clientpositive/stats11.q.out b/ql/src/test/results/clientpositive/stats11.q.out index ce1197e..c3158a7 100644 --- a/ql/src/test/results/clientpositive/stats11.q.out +++ b/ql/src/test/results/clientpositive/stats11.q.out @@ -37,14 +37,9 @@ POSTHOOK: type: LOAD STAGE DEPENDENCIES: Stage-0 is a root stage Stage-1 depends on stages: Stage-0 - Stage-2 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-0 - Copy -#### A masked pattern was here #### - - Stage: Stage-1 Move Operator tables: partition: @@ -56,7 +51,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.srcbucket_mapjoin_part - Stage: Stage-2 + Stage: Stage-1 Stats-Aggr Operator PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out index a14e449..d8a087b 100644 --- a/ql/src/test/results/clientpositive/stats3.q.out +++ b/ql/src/test/results/clientpositive/stats3.q.out @@ -33,14 +33,9 @@ TOK_LOAD STAGE DEPENDENCIES: Stage-0 is a root stage Stage-1 depends on stages: Stage-0 - Stage-2 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-0 - Copy -#### A masked pattern was here #### - - Stage: Stage-1 Move Operator tables: replace: true @@ -61,7 +56,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.hive_test_src - Stage: Stage-2 + Stage: Stage-1 Stats-Aggr Operator PREHOOK: query: load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src