diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java index da2ca7276d..49cb302023 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/TestStreaming.java @@ -20,6 +20,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; +import java.io.FileFilter; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; @@ -2056,7 +2057,13 @@ public void testErrorHandling() throws Exception { if(!deltaDir.getName().startsWith("delta")) { continue; } - File[] bucketFiles = deltaDir.listFiles(); + File[] bucketFiles = deltaDir.listFiles(new FileFilter() { + @Override + public boolean accept(File pathname) { + String name = pathname.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }); for (File bucketFile : bucketFiles) { if(bucketFile.toString().endsWith("length")) { continue; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 98bb938c13..039b79bb78 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Utilities.MissingBucketsContext; import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.AcidUtils.Operation; import org.apache.hadoop.hive.ql.io.BucketCodec; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveKey; @@ -264,15 +263,7 @@ private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) } FileUtils.mkdir(fs, finalPaths[idx].getParent(), hconf); } - // If we're updating or deleting there may be no file to close. This can happen - // because the where clause strained out all of the records for a given bucket. So - // before attempting the rename below, check if our file exists. If it doesn't, - // then skip the rename. If it does try it. We could just blindly try the rename - // and avoid the extra stat, but that would mask other errors. - Operation acidOp = conf.getWriteType(); - boolean needToRename = outPaths[idx] != null && ((acidOp != Operation.UPDATE - && acidOp != Operation.DELETE) || fs.exists(outPaths[idx])); - if (needToRename && outPaths[idx] != null) { + if(outPaths[idx] != null && fs.exists(outPaths[idx])) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("committing " + outPaths[idx] + " to " + finalPaths[idx] + " (" + isMmTable + ")"); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 553e8bcf4e..adb83dd44f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -18,17 +18,7 @@ package org.apache.hadoop.hive.ql.io; -import java.io.IOException; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.regex.Pattern; - +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -39,14 +29,15 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.io.orc.OrcFile; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater; import org.apache.hadoop.hive.ql.io.orc.Reader; +import org.apache.hadoop.hive.ql.io.orc.Writer; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.shims.HadoopShims; @@ -59,7 +50,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.regex.Pattern; import static org.apache.hadoop.hive.ql.exec.Utilities.COPY_KEYWORD; @@ -1605,4 +1606,88 @@ public static boolean isRawFormat(Path baseOrDeltaDir, FileSystem fs) throws IOE } } } + + /** + * Logic related to versioning acid data format. An {@code ACID_FORMAT} file is written to each + * base/delta/delete_delta dir written by a full acid write or compaction. This is the primary + * mechanism for versioning acid data. + * + * Each individual ORC file written stores the current version as a user property in ORC footer. + * All data files produced by Acid write should have this (starting with Hive 3.0), including + * those written by compactor. This is more for sanity checking in case someone moved the files + * around or something like that. + */ + public static final class OrcAcidVersion { + private static final String ACID_VERSION_KEY = "hive.acid.version"; + private static final String ACID_FORMAT = "_orc_acid_version"; + public static final int ORC_ACID_VERSION_DEFAULT = 0; + /** + * 2 is the version of Acid released in Hive 3.0. + */ + public static final int ORC_ACID_VERSION = 2; + /** + * Inlucde current acid version in file footer. + * @param writer - file written + */ + public static void setAcidVersionInDataFile(Writer writer) { + //so that we know which version wrote the file + ByteBuffer bf = ByteBuffer.allocate(4).putInt(ORC_ACID_VERSION); + bf.rewind(); //don't ask - some ByteBuffer weridness. w/o this, empty buffer is written + writer.addUserMetadata(ACID_VERSION_KEY, bf); + } + /** + * This is smart enough to handle streaming ingest where there could be a + * {@link OrcAcidUtils#DELTA_SIDE_FILE_SUFFIX} side file. + * @param dataFile - ORC acid data file + * @return version property from file if there, + * {@link #ORC_ACID_VERSION_DEFAULT} otherwise + */ + @VisibleForTesting + public static int getAcidVersionFromDataFile(Path dataFile, FileSystem fs) throws IOException { + FileStatus fileStatus = fs.getFileStatus(dataFile); + Reader orcReader = OrcFile.createReader(dataFile, + OrcFile.readerOptions(fs.getConf()) + .filesystem(fs) + //make sure to check for side file in case streaming ingest died + .maxLength(getLogicalLength(fs, fileStatus))); + if(orcReader.hasMetadataValue(ACID_VERSION_KEY)) { + return orcReader.getMetadataValue(ACID_VERSION_KEY).getInt(); + } + return ORC_ACID_VERSION_DEFAULT; + } + /** + * This creates a version file in {@code deltaOrBaseDir} + * @param deltaOrBaseDir - where to create the version file + */ + public static void writeVersionFile(Path deltaOrBaseDir, FileSystem fs) throws IOException { + Path formatFile = getVersionFilePath(deltaOrBaseDir); + if(!fs.exists(formatFile)) { + try (FSDataOutputStream strm = fs.create(formatFile, false)) { + strm.writeInt(ORC_ACID_VERSION); + } catch (IOException ioe) { + LOG.error("Failed to create " + formatFile + " due to: " + ioe.getMessage(), ioe); + throw ioe; + } + } + } + public static Path getVersionFilePath(Path deltaOrBase) { + return new Path(deltaOrBase, ACID_FORMAT); + } + @VisibleForTesting + public static int getAcidVersionFromMetaFile(Path deltaOrBaseDir, FileSystem fs) + throws IOException { + Path formatFile = getVersionFilePath(deltaOrBaseDir); + if(!fs.exists(formatFile)) { + LOG.debug(formatFile + " not found, returning default: " + ORC_ACID_VERSION_DEFAULT); + return ORC_ACID_VERSION_DEFAULT; + } + try (FSDataInputStream inputStream = fs.open(formatFile)) { + return inputStream.readInt(); + } + catch(IOException ex) { + LOG.error(formatFile + " is unreadable due to: " + ex.getMessage(), ex); + throw ex; + } + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java index f1f638d980..d714bef486 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java @@ -40,19 +40,11 @@ import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; -import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; @@ -300,6 +292,7 @@ public RecordUpdater getRecordUpdater(Path path, opts.inspector(options.getInspector()) .callback(watcher); final Writer writer = OrcFile.createWriter(filename, opts); + AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(writer); return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() { @Override public void write(Writable w) throws IOException { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index b90ce6ed2f..097e89032b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.io.BucketCodec; import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.io.RecordUpdater; -import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -63,9 +62,6 @@ private static final Logger LOG = LoggerFactory.getLogger(OrcRecordUpdater.class); static final String ACID_KEY_INDEX_NAME = "hive.acid.key.index"; - private static final String ACID_FORMAT = "_orc_acid_version"; - private static final int ORC_ACID_VERSION = 0; - final static int INSERT_OPERATION = 0; final static int UPDATE_OPERATION = 1; @@ -86,6 +82,7 @@ final static long DELTA_STRIPE_SIZE = 16 * 1024 * 1024; private static final Charset UTF8 = Charset.forName("UTF-8"); + private static final CharsetDecoder utf8Decoder = UTF8.newDecoder(); private final AcidOutputFormat.Options options; private final AcidUtils.AcidOperationalProperties acidOperationalProperties; @@ -197,9 +194,9 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { return new OrcStruct.OrcStructInspector(fields); } /** - * @param path - partition root + * @param partitionRoot - partition root (or table root if not partitioned) */ - OrcRecordUpdater(Path path, + OrcRecordUpdater(Path partitionRoot, AcidOutputFormat.Options options) throws IOException { this.options = options; // Initialize acidOperationalProperties based on table properties, and @@ -227,27 +224,16 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { } } this.bucket.set(bucketCodec.encode(options)); - this.path = AcidUtils.createFilename(path, options); + this.path = AcidUtils.createFilename(partitionRoot, options); this.deleteEventWriter = null; this.deleteEventPath = null; FileSystem fs = options.getFilesystem(); if (fs == null) { - fs = path.getFileSystem(options.getConfiguration()); + fs = partitionRoot.getFileSystem(options.getConfiguration()); } this.fs = fs; - Path formatFile = new Path(path, ACID_FORMAT); - if(!fs.exists(formatFile)) { - try (FSDataOutputStream strm = fs.create(formatFile, false)) { - strm.writeInt(ORC_ACID_VERSION); - } catch (IOException ioe) { - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to create " + path + "/" + ACID_FORMAT + " with " + - ioe); - } - } - } if (options.getMinimumTransactionId() != options.getMaximumTransactionId() - && !options.isWritingBase()){ + && !options.isWritingBase()) { //throw if file already exists as that should never happen flushLengths = fs.create(OrcAcidUtils.getSideFile(this.path), false, 8, options.getReporter()); @@ -284,7 +270,7 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { // This writes to a file in directory which starts with "delete_delta_..." // The actual initialization of a writer only happens if any delete events are written //to avoid empty files. - this.deleteEventPath = AcidUtils.createFilename(path, deleteOptions); + this.deleteEventPath = AcidUtils.createFilename(partitionRoot, deleteOptions); /** * HIVE-14514 is not done so we can't clone writerOptions(). So here we create a new * options object to make sure insert and delete writers don't share them (like the @@ -321,7 +307,6 @@ static StructObjectInspector createEventSchema(ObjectInspector rowInspector) { item.setFieldValue(BUCKET, bucket); item.setFieldValue(ROW_ID, rowId); } - @Override public String toString() { return getClass().getName() + "[" + path +"]"; @@ -382,9 +367,7 @@ private void addSimpleEvent(int operation, long currentTransaction, long rowId, item.setFieldValue(OrcRecordUpdater.OPERATION, new IntWritable(operation)); item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row)); indexBuilder.addKey(operation, originalTransaction, bucket.get(), rowId); - if (writer == null) { - writer = OrcFile.createWriter(path, writerOptions); - } + initWriter(); writer.addRow(item); restoreBucket(currentBucket, operation); } @@ -418,7 +401,9 @@ private void addSplitUpdateEvent(int operation, long currentTransaction, long ro // Initialize an indexBuilder for deleteEvents. (HIVE-17284) deleteEventIndexBuilder = new KeyIndexBuilder("delete"); this.deleteEventWriter = OrcFile.createWriter(deleteEventPath, - deleteWriterOptions.callback(deleteEventIndexBuilder)); + deleteWriterOptions.callback(deleteEventIndexBuilder)); + AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(deleteEventWriter); + AcidUtils.OrcAcidVersion.writeVersionFile(this.deleteEventPath.getParent(), fs); } // A delete/update generates a delete event for the original row. @@ -484,9 +469,7 @@ public void flush() throws IOException { throw new IllegalStateException("Attempting to flush a RecordUpdater on " + path + " with a single transaction."); } - if (writer == null) { - writer = OrcFile.createWriter(path, writerOptions); - } + initWriter(); long len = writer.writeIntermediateFooter(); flushLengths.writeLong(len); OrcInputFormat.SHIMS.hflush(flushLengths); @@ -509,10 +492,8 @@ public void close(boolean abort) throws IOException { writer.close(); // normal close, when there are inserts. } } else { - if (writer == null) { - //so that we create empty bucket files when needed (but see HIVE-17138) - writer = OrcFile.createWriter(path, writerOptions); - } + //so that we create empty bucket files when needed (but see HIVE-17138) + initWriter(); writer.close(); // normal close. } if (deleteEventWriter != null) { @@ -533,6 +514,13 @@ public void close(boolean abort) throws IOException { deleteEventWriter = null; writerClosed = true; } + private void initWriter() throws IOException { + if (writer == null) { + writer = OrcFile.createWriter(path, writerOptions); + AcidUtils.OrcAcidVersion.setAcidVersionInDataFile(writer); + AcidUtils.OrcAcidVersion.writeVersionFile(path.getParent(), fs); + } + } @Override public SerDeStats getStats() { @@ -543,9 +531,6 @@ public SerDeStats getStats() { return stats; } - private static final Charset utf8 = Charset.forName("UTF-8"); - private static final CharsetDecoder utf8Decoder = utf8.newDecoder(); - static RecordIdentifier[] parseKeyIndex(Reader reader) { String[] stripes; try { diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9c3b54f56f..11f6bcae29 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -3641,13 +3641,13 @@ produced by a (optimized) Union All query └── -ext-10000 ├── HIVE_UNION_SUBDIR_1 │   └── 000000_0 - │   ├── _orc_acid_version │   └── delta_0000019_0000019_0001 + │   ├── _orc_acid_version │   └── bucket_00000 ├── HIVE_UNION_SUBDIR_2 │   └── 000000_0 - │   ├── _orc_acid_version │   └── delta_0000019_0000019_0002 + │   ├── _orc_acid_version │   └── bucket_00000 The assumption is that we either have all data in subdirs or root of srcPath but not both. @@ -3706,7 +3706,10 @@ private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, F try { if (!createdDeltaDirs.contains(deltaDest)) { try { - fs.mkdirs(deltaDest); + if(fs.mkdirs(deltaDest)) { + fs.rename(AcidUtils.OrcAcidVersion.getVersionFilePath(deltaStat.getPath()), + AcidUtils.OrcAcidVersion.getVersionFilePath(deltaDest)); + } createdDeltaDirs.add(deltaDest); } catch (IOException swallowIt) { // Don't worry about this, as it likely just means it's already been created. diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 236e585dc8..4bc83aa6d0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.ql.io.AcidInputFormat; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.IOConstants; import org.apache.hadoop.hive.ql.io.RecordIdentifier; @@ -935,6 +934,7 @@ public void commitJob(JobContext context) throws IOException { " not found. Assuming 0 splits. Creating " + newDeltaDir); fs.mkdirs(newDeltaDir); createCompactorMarker(conf, newDeltaDir, fs); + AcidUtils.OrcAcidVersion.writeVersionFile(newDeltaDir, fs); return; } FileStatus[] contents = fs.listStatus(tmpLocation);//expect 1 base or delta dir in this list @@ -943,7 +943,12 @@ public void commitJob(JobContext context) throws IOException { for (FileStatus fileStatus : contents) { //newPath is the base/delta dir Path newPath = new Path(finalLocation, fileStatus.getPath().getName()); + /*rename(A, B) has "interesting" behavior if A and B are directories. If B doesn't exist, + * it does the expected operation and everything that was in A is now in B. If B exists, + * it will make A a child of B... thus make sure the rename() is done before creating the + * meta files which will create base_x/ (i.e. B)...*/ fs.rename(fileStatus.getPath(), newPath); + AcidUtils.OrcAcidVersion.writeVersionFile(newPath, fs); createCompactorMarker(conf, newPath, fs); } fs.delete(tmpLocation, true); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 2a1545f1da..39a88bf67f 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.hive.ql; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetastoreTaskThread; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnState; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; @@ -834,4 +838,60 @@ public void testMoreBucketsThanReducers2() throws Exception { int[][] expected = {{0, -1},{0, -1}, {1, -1}, {1, -1}, {2, -1}, {2, -1}, {3, -1}, {3, -1}}; Assert.assertEquals(stringifyValues(expected), r); } + @Test + public void testVersioning() throws Exception { + hiveConf.set(MetastoreConf.ConfVars.CREATE_TABLES_AS_ACID.getVarname(), "true"); + runStatementOnDriver("drop table if exists T"); + runStatementOnDriver("create table T (a int, b int) stored as orc"); + int[][] data = {{1, 2}}; + //create 1 delta file bucket_00000 + runStatementOnDriver("insert into T" + makeValuesClause(data)); + + //delete the bucket files so now we have empty delta dirs + List rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T"); + FileSystem fs = FileSystem.get(hiveConf); + Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.DELTA_PREFIX)); + Path filePath = new Path(rs.get(0)); + int version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs); + //check it has expected version marker + Assert.assertEquals("Unexpected version marker in " + filePath, + AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version); + + //check that delta dir has a version file with expected value + filePath = filePath.getParent(); + Assert.assertTrue(filePath.getName().startsWith(AcidUtils.DELTA_PREFIX)); + int versionFromMetaFile = AcidUtils.OrcAcidVersion + .getAcidVersionFromMetaFile(filePath, fs); + Assert.assertEquals("Unexpected version marker in " + filePath, + AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile); + + runStatementOnDriver("insert into T" + makeValuesClause(data)); + runStatementOnDriver("alter table T compact 'major'"); + TestTxnCommands2.runWorker(hiveConf); + + //check status of compaction job + TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); + ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); + Assert.assertEquals("Unexpected number of compactions in history", 1, resp.getCompactsSize()); + Assert.assertEquals("Unexpected 0 compaction state", + TxnStore.CLEANING_RESPONSE, resp.getCompacts().get(0).getState()); + Assert.assertTrue(resp.getCompacts().get(0).getHadoopJobId().startsWith("job_local")); + + rs = runStatementOnDriver("select distinct INPUT__FILE__NAME from T"); + Assert.assertTrue(rs != null && rs.size() == 1 && rs.get(0).contains(AcidUtils.BASE_PREFIX)); + + filePath = new Path(rs.get(0)); + version = AcidUtils.OrcAcidVersion.getAcidVersionFromDataFile(filePath, fs); + //check that files produced by compaction still have the version marker + Assert.assertEquals("Unexpected version marker in " + filePath, + AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, version); + + //check that compacted base dir has a version file with expected value + filePath = filePath.getParent(); + Assert.assertTrue(filePath.getName().startsWith(AcidUtils.BASE_PREFIX)); + versionFromMetaFile = AcidUtils.OrcAcidVersion.getAcidVersionFromMetaFile( + filePath, fs); + Assert.assertEquals("Unexpected version marker in " + filePath, + AcidUtils.OrcAcidVersion.ORC_ACID_VERSION, versionFromMetaFile); + } } diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 8a6a056326..37c15a8770 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -2473,14 +2473,14 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(648, split.getLength()); + assertEquals(663, split.getLength()); split = (HiveInputFormat.HiveInputSplit) splits[1]; assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", split.inputFormatClassName()); assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(674, split.getLength()); + assertEquals(690, split.getLength()); CombineHiveInputFormat.CombineHiveInputSplit combineSplit = (CombineHiveInputFormat.CombineHiveInputSplit) splits[2]; assertEquals(BUCKETS, combineSplit.getNumPaths()); diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java index 063812610e..a1b97d0a9b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java +++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java @@ -307,7 +307,7 @@ public void minorTableWithBase() throws Exception { for (int i = 0; i < stat.length; i++) { if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); @@ -316,7 +316,7 @@ public void minorTableWithBase() throws Exception { } if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(21, 24))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); @@ -439,7 +439,7 @@ public void minorPartitionWithBase() throws Exception { for (int i = 0; i < stat.length; i++) { if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); @@ -447,14 +447,14 @@ public void minorPartitionWithBase() throws Exception { Assert.assertEquals(104L, buckets[1].getLen()); } if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(21, 24))) { - sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); - Assert.assertEquals(2, buckets.length); - Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); - Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); - Assert.assertEquals(104L, buckets[0].getLen()); - Assert.assertEquals(104L, buckets[1].getLen()); - } else { + sawNewDelta = true; + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); + Assert.assertEquals(2, buckets.length); + Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); + Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); + Assert.assertEquals(104L, buckets[0].getLen()); + Assert.assertEquals(104L, buckets[1].getLen()); + } else { LOG.debug("This is not the delta file you are looking for " + stat[i].getPath().getName()); } } @@ -491,7 +491,7 @@ public void minorTableNoBase() throws Exception { for (int i = 0; i < stat.length; i++) { if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(1, 4))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); @@ -500,7 +500,7 @@ public void minorTableNoBase() throws Exception { } if (stat[i].getPath().getName().equals(makeDeleteDeltaDirNameCompacted(1, 4))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); @@ -843,7 +843,7 @@ public void minorTableLegacy() throws Exception { for (int i = 0; i < stat.length; i++) { if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) { sawNewDelta = true; - FileStatus[] buckets = fs.listStatus(stat[i].getPath()); + FileStatus[] buckets = fs.listStatus(stat[i].getPath(), AcidUtils.hiddenFileFilter); Assert.assertEquals(2, buckets.length); Assert.assertTrue(buckets[0].getPath().getName().matches("bucket_0000[01]")); Assert.assertTrue(buckets[1].getPath().getName().matches("bucket_0000[01]")); diff --git ql/src/test/results/clientpositive/acid_nullscan.q.out ql/src/test/results/clientpositive/acid_nullscan.q.out index 7fcc831239..a556591cb1 100644 --- ql/src/test/results/clientpositive/acid_nullscan.q.out +++ ql/src/test/results/clientpositive/acid_nullscan.q.out @@ -42,12 +42,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid_vectorized - Statistics: Num rows: 1 Data size: 24510 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 25000 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 1 Data size: 24510 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 25000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(a) mode: hash @@ -83,7 +83,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe - totalSize 2451 + totalSize 2500 transactional true transactional_properties default #### A masked pattern was here #### @@ -106,7 +106,7 @@ STAGE PLANS: serialization.ddl struct acid_vectorized { i32 a, string b} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2451 + totalSize 2500 transactional true transactional_properties default #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/acid_table_stats.q.out ql/src/test/results/clientpositive/acid_table_stats.q.out index 74d4c44592..0dba1cbe65 100644 --- ql/src/test/results/clientpositive/acid_table_stats.q.out +++ ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -95,7 +95,7 @@ Partition Parameters: numFiles 2 numRows 0 rawDataSize 0 - totalSize 3950 + totalSize 3981 #### A masked pattern was here #### # Storage Information @@ -210,7 +210,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 3950 + totalSize 3981 #### A masked pattern was here #### # Storage Information @@ -261,7 +261,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 3950 + totalSize 3981 #### A masked pattern was here #### # Storage Information @@ -386,7 +386,7 @@ Partition Parameters: numFiles 4 numRows 1000 rawDataSize 208000 - totalSize 7904 + totalSize 7966 #### A masked pattern was here #### # Storage Information @@ -433,7 +433,7 @@ Partition Parameters: numFiles 4 numRows 2000 rawDataSize 416000 - totalSize 7904 + totalSize 7966 #### A masked pattern was here #### # Storage Information diff --git ql/src/test/results/clientpositive/autoColumnStats_4.q.out ql/src/test/results/clientpositive/autoColumnStats_4.q.out index b3df04fc9a..82bee313ff 100644 --- ql/src/test/results/clientpositive/autoColumnStats_4.q.out +++ ql/src/test/results/clientpositive/autoColumnStats_4.q.out @@ -197,7 +197,7 @@ Table Parameters: numFiles 2 numRows 0 rawDataSize 0 - totalSize 1798 + totalSize 1834 transactional true transactional_properties default #### A masked pattern was here #### @@ -241,7 +241,7 @@ Table Parameters: numFiles 4 numRows 0 rawDataSize 0 - totalSize 2909 + totalSize 2976 transactional true transactional_properties default #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out index 1195f20114..cbaba3cbd3 100644 --- ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out +++ ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out @@ -103,7 +103,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 32572 + totalSize 32875 transactional true transactional_properties default #### A masked pattern was here #### @@ -127,7 +127,7 @@ STAGE PLANS: serialization.ddl struct acidtbldefault { i32 a} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 32572 + totalSize 32875 transactional true transactional_properties default #### A masked pattern was here #### diff --git ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out index aa2dcc7f68..cd3023f9d5 100644 --- ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out +++ ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out @@ -252,7 +252,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 16673 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 6 + HDFS_READ_OPS: 7 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -283,7 +283,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -302,7 +302,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1055 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -334,7 +334,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -364,7 +364,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -394,7 +394,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -424,7 +424,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -454,7 +454,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -484,7 +484,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -514,7 +514,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -544,7 +544,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -574,7 +574,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -604,7 +604,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -634,7 +634,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -664,7 +664,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 103 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -694,7 +694,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -713,7 +713,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -732,7 +732,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -759,7 +759,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -786,7 +786,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -805,7 +805,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 5691 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -837,7 +837,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -867,7 +867,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -897,7 +897,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -916,7 +916,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -946,7 +946,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -973,7 +973,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1003,7 +1003,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1033,7 +1033,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1063,7 +1063,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1093,7 +1093,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1123,7 +1123,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1153,7 +1153,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1183,7 +1183,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1213,7 +1213,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1243,7 +1243,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1270,7 +1270,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1297,7 +1297,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1324,7 +1324,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1354,7 +1354,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1384,7 +1384,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1414,7 +1414,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: diff --git ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out index eb54a81d1a..e7a1a5b675 100644 --- ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out +++ ql/src/test/results/clientpositive/llap/orc_llap_counters1.q.out @@ -252,7 +252,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 17728 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 7 + HDFS_READ_OPS: 8 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -284,7 +284,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: diff --git ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out index 590437c708..75a2908fd1 100644 --- ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out +++ ql/src/test/results/clientpositive/llap/orc_ppd_basic.q.out @@ -205,7 +205,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 16673 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 6 + HDFS_READ_OPS: 7 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -236,7 +236,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -255,7 +255,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1055 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -287,7 +287,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -317,7 +317,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -347,7 +347,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -377,7 +377,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -407,7 +407,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -437,7 +437,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -467,7 +467,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -497,7 +497,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -527,7 +527,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -557,7 +557,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -587,7 +587,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -617,7 +617,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 103 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -647,7 +647,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -666,7 +666,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -685,7 +685,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -712,7 +712,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -739,7 +739,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -758,7 +758,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 5691 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -790,7 +790,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -820,7 +820,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -850,7 +850,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -869,7 +869,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -899,7 +899,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -926,7 +926,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -956,7 +956,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -986,7 +986,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1016,7 +1016,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1046,7 +1046,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1076,7 +1076,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1106,7 +1106,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1136,7 +1136,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1166,7 +1166,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1196,7 +1196,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1223,7 +1223,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1250,7 +1250,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1277,7 +1277,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1307,7 +1307,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1337,7 +1337,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1367,7 +1367,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1397,7 +1397,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 4912 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1429,7 +1429,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1751 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1461,7 +1461,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1491,7 +1491,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1522,7 +1522,7 @@ PREHOOK: Output: default@orc_ppd_1 Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 10129 HDFS_BYTES_WRITTEN: 1415 - HDFS_READ_OPS: 5 + HDFS_READ_OPS: 6 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 3 Stage-1 HIVE COUNTERS: @@ -1552,7 +1552,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1539 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 6 + HDFS_READ_OPS: 7 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1584,7 +1584,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1614,7 +1614,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1644,7 +1644,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: diff --git ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out index c7c8993d0e..d9572e73fe 100644 --- ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out +++ ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out @@ -205,7 +205,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 17008 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 6 + HDFS_READ_OPS: 7 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -237,7 +237,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -256,7 +256,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -286,7 +286,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 720 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -318,7 +318,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -348,7 +348,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -382,7 +382,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -412,7 +412,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -431,7 +431,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -461,7 +461,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -491,7 +491,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -521,7 +521,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -555,7 +555,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -585,7 +585,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -604,7 +604,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -634,7 +634,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -664,7 +664,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -694,7 +694,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -728,7 +728,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -758,7 +758,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -777,7 +777,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -807,7 +807,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -837,7 +837,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -867,7 +867,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -901,7 +901,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 16898 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -925,7 +925,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 17728 HDFS_BYTES_WRITTEN: 104 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -949,7 +949,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 16898 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -973,7 +973,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 17728 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -997,7 +997,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 16898 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1021,7 +1021,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 17728 HDFS_BYTES_WRITTEN: 102 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1045,7 +1045,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 4912 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1077,7 +1077,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1751 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1113,7 +1113,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 21458 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1137,7 +1137,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 23336 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1165,7 +1165,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 21458 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1189,7 +1189,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 23336 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1213,7 +1213,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 4099 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1245,7 +1245,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1592 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1281,7 +1281,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 20629 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1305,7 +1305,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 22364 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1333,7 +1333,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1363,7 +1363,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1397,7 +1397,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 20629 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1421,7 +1421,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 22364 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1449,7 +1449,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1479,7 +1479,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 0 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 2 + HDFS_READ_OPS: 3 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1513,7 +1513,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 2183 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1545,7 +1545,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 18747 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1569,7 +1569,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 1217 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 3 + HDFS_READ_OPS: 4 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: @@ -1601,7 +1601,7 @@ PREHOOK: Output: hdfs://### HDFS PATH ### Stage-1 FILE SYSTEM COUNTERS: HDFS_BYTES_READ: 20073 HDFS_BYTES_WRITTEN: 101 - HDFS_READ_OPS: 4 + HDFS_READ_OPS: 5 HDFS_LARGE_READ_OPS: 0 HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: