diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 623e7c1..25f9a96 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -488,6 +488,7 @@ minillaplocal.query.files=acid_globallimit.q,\ explainuser_1.q,\ explainuser_4.q,\ groupby2.q,\ + groupby8_with_counters.q,\ hybridgrace_hashjoin_1.q,\ hybridgrace_hashjoin_2.q,\ infer_bucket_sort_bucketed_table.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapOperator.java index 2e0ef74..52b41f5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapOperator.java @@ -70,7 +70,8 @@ */ public static enum Counter { DESERIALIZE_ERRORS, - RECORDS_IN + RECORDS_IN, + GROUPBY_FLUSHES } protected final transient LongWritable deserialize_error_count = new LongWritable(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index cddf14f..a80f08f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import javolution.util.FastBitSet; @@ -146,6 +147,11 @@ private transient int countAfterReport; // report or forward private transient int heartbeatInterval; + private transient boolean isTez; + private transient boolean isLlap; + private transient int numExecutors; + private transient long numHashTableFlushes; + /** * Total amount of memory allowed for JVM heap. */ @@ -391,17 +397,20 @@ protected void initializeOp(Configuration hconf) throws HiveException { new KeyWrapperFactory(keyFields, keyObjectInspectors, currentKeyObjectInspectors); newKeys = keyWrapperFactory.getKeyWrapper(); - + isTez = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez"); + isLlap = isTez && HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE).equals("llap"); + numExecutors = isLlap ? HiveConf.getIntVar(hconf, HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS) : 1; firstRow = true; // estimate the number of hash table entries based on the size of each // entry. Since the size of a entry // is not known, estimate that based on the number of entries if (hashAggr) { - computeMaxEntriesHashAggr(hconf); + computeMaxEntriesHashAggr(); } memoryMXBean = ManagementFactory.getMemoryMXBean(); - maxMemory = memoryMXBean.getHeapMemoryUsage().getMax(); + maxMemory = isTez ? getConf().getMaxMemoryAvailable() : memoryMXBean.getHeapMemoryUsage().getMax(); memoryThreshold = this.getConf().getMemoryThreshold(); + LOG.info("isTez: {} isLlap: {} numExecutors: {} maxMemory: {}", isTez, isLlap, numExecutors, maxMemory); } /** @@ -413,9 +422,14 @@ protected void initializeOp(Configuration hconf) throws HiveException { * @return number of entries that can fit in hash table - useful for map-side * aggregation only **/ - private void computeMaxEntriesHashAggr(Configuration hconf) throws HiveException { + private void computeMaxEntriesHashAggr() throws HiveException { float memoryPercentage = this.getConf().getGroupByMemoryUsage(); - maxHashTblMemory = (long) (memoryPercentage * Runtime.getRuntime().maxMemory()); + if (isTez) { + maxHashTblMemory = (long) (memoryPercentage * getConf().getMaxMemoryAvailable()); + } else { + maxHashTblMemory = (long) (memoryPercentage * Runtime.getRuntime().maxMemory()); + } + LOG.info("Max hash table memory: {} bytes", maxHashTblMemory); estimateRowSize(); } @@ -875,6 +889,14 @@ private boolean shouldBeFlushed(KeyWrapper newKeys) { if ((numEntriesHashTable == 0) || ((numEntries % NUMROWSESTIMATESIZE) == 0)) { //check how much memory left memory usedMemory = memoryMXBean.getHeapMemoryUsage().getUsed(); + // TODO: there is no reliable way to compute the memory used by the executor threads (not sure if tez memory + // manager can provide it?). For now, for llap we assume the used memory is equally divided among all executors + usedMemory = isLlap ? usedMemory / numExecutors : usedMemory; + if (isLogInfoEnabled) { + LOG.info("usedMemory: {} usedMemPerExecutor: {} maxMemory: {} maxMemPerExecutor: {}", + memoryMXBean.getHeapMemoryUsage().getUsed(), usedMemory, + memoryMXBean.getHeapMemoryUsage().getMax(), maxMemory); + } rate = (float) usedMemory / (float) maxMemory; if(rate > memoryThreshold){ return true; @@ -957,7 +979,7 @@ else if (o instanceof ByteArrayRef){ * @throws HiveException */ private void flushHashTable(boolean complete) throws HiveException { - + numHashTableFlushes++; countAfterReport = 0; // Currently, the algorithm flushes 10% of the entries - this can be @@ -973,7 +995,7 @@ private void flushHashTable(boolean complete) throws HiveException { hashAggregations.clear(); hashAggregations = null; if (isLogInfoEnabled) { - LOG.info("Hash Table completed flushed"); + LOG.info("Hash Table completed flushed"); } return; } @@ -991,9 +1013,9 @@ private void flushHashTable(boolean complete) throws HiveException { iter.remove(); numDel++; if (numDel * 10 >= oldSize) { - if (isLogInfoEnabled) { - LOG.info("Hash Table flushed: new size = " + hashAggregations.size()); - } + if (isLogInfoEnabled) { + LOG.info("Hash Table flushed: new size = " + hashAggregations.size()); + } return; } } @@ -1102,6 +1124,7 @@ public void closeOp(boolean abort) throws HiveException { throw new HiveException(e); } } + statsMap.put(AbstractMapOperator.Counter.GROUPBY_FLUSHES.name(), new LongWritable(numHashTableFlushes)); hashAggregations = null; super.closeOp(abort); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java index a8ed74c..2740659 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java @@ -330,4 +330,21 @@ public static int countOperatorsUpstream(Operator start, Set> operators, + final long memoryAvailableToTask) { + if (operators == null) { + return; + } + + for (Operator op : operators) { + if (op.getConf() != null) { + LOG.debug("Setting max memory available for operator {} to {} bytes", op.getName(), memoryAvailableToTask); + op.getConf().setMaxMemoryAvailable(memoryAvailableToTask); + } + if (op.getChildOperators() != null && !op.getChildOperators().isEmpty()) { + setMemoryAvailable(op.getChildOperators(), memoryAvailableToTask); + } + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index 6f36dfb..ce503ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -193,6 +193,7 @@ public Object call() { } else { mapOp = new MapOperator(runtimeCtx); } + // Not synchronizing creation of mapOp with an invocation. Check immediately // after creation in case abort has been set. // Relying on the regular flow to clean up the actual operator. i.e. If an exception is @@ -283,6 +284,14 @@ public Object call() { mapOp.passExecContext(execContext); l4j.info(mapOp.dump(0)); + // set memory available for operators + long memoryAvailableToTask = getMemoryAvailableForTasks(processorContext.getTotalMemoryAvailableToTask()); + if (mapOp.getConf() != null) { + mapOp.getConf().setMaxMemoryAvailable(memoryAvailableToTask); + l4j.info("Memory available for operators set to {} bytes", memoryAvailableToTask); + } + OperatorUtils.setMemoryAvailable(mapOp.getChildOperators(), memoryAvailableToTask); + mapOp.initializeLocalWork(jconf); checkAbortCondition(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java index 77c7fa3..bf86c07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java @@ -24,6 +24,7 @@ import java.util.Map.Entry; import java.util.concurrent.Callable; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ObjectCache; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.tez.TezProcessor.TezKVOutputCollector; @@ -161,4 +162,25 @@ protected void checkAbortCondition() throws InterruptedException { throw new InterruptedException("Processing thread aborted. Interrupt state: " + interruptState); } } + + protected long getMemoryAvailableForTasks(final long totalMemoryAvailableToTask) { + long memoryAvailableToTask = totalMemoryAvailableToTask; + final String execEngine = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE); + final String execMode = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_EXECUTION_MODE); + if (execEngine.equals("tez") && execMode.equals("llap")) { + final int numExecutors = HiveConf.getIntVar(jconf, HiveConf.ConfVars.LLAP_DAEMON_NUM_EXECUTORS); + final boolean llapIo = HiveConf.getBoolVar(jconf, HiveConf.ConfVars.LLAP_IO_ENABLED, false); + final boolean llapIoMode = HiveConf.getVar(jconf, HiveConf.ConfVars.LLAP_IO_MEMORY_MODE).equals("cache"); + final boolean onHeapCache = HiveConf.getBoolVar(jconf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT); + final boolean offsetCacheMemory = llapIo && llapIoMode && onHeapCache; + if (offsetCacheMemory) { + final long cacheMemory = HiveConf.getSizeVar(jconf, HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE); + memoryAvailableToTask -= cacheMemory; + l4j.info("llapIo: {} llapIoMode: {} onHeapCache: {} cacheMemory: {} memoryAvailableToTask: {} " + + "numExecutors: {}", llapIo, llapIoMode, onHeapCache, cacheMemory, memoryAvailableToTask, numExecutors); + } + memoryAvailableToTask = memoryAvailableToTask / numExecutors; + } + return memoryAvailableToTask; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java index e4c13fb..3ff1c25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java @@ -199,6 +199,15 @@ void init( checkAbortCondition(); reducer = reduceWork.getReducer(); + + // set memory available for operators + long memoryAvailableToTask = getMemoryAvailableForTasks(processorContext.getTotalMemoryAvailableToTask()); + if (reducer.getConf() != null) { + reducer.getConf().setMaxMemoryAvailable(memoryAvailableToTask); + l4j.info("Memory available for operators set to {} bytes", memoryAvailableToTask); + } + OperatorUtils.setMemoryAvailable(reducer.getChildOperators(), memoryAvailableToTask); + // initialize reduce operator tree try { l4j.info(reducer.dump(0)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java index e217bdf..7df9d07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java @@ -33,6 +33,7 @@ protected transient OpTraits opTraits; protected transient Map opProps; protected long memNeeded = 0; + protected long memAvailable = 0; protected String runtimeStatsTmpDir; @Override @@ -93,6 +94,16 @@ public void setMemoryNeeded(long memNeeded) { this.memNeeded = memNeeded; } + @Override + public long getMaxMemoryAvailable() { + return memAvailable; + } + + @Override + public void setMaxMemoryAvailable(final long memoryAvailble) { + this.memAvailable = memoryAvailble; + } + public String getRuntimeStatsTmpDir() { return runtimeStatsTmpDir; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java index ad620c2..850576c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java @@ -30,6 +30,8 @@ public Map getOpProps(); public long getMemoryNeeded(); public void setMemoryNeeded(long memoryNeeded); + public long getMaxMemoryAvailable(); + public void setMaxMemoryAvailable(long memoryAvailble); public String getRuntimeStatsTmpDir(); public void setRuntimeStatsTmpDir(String runtimeStatsTmpDir); } diff --git a/ql/src/test/queries/clientpositive/groupby8_with_counters.q b/ql/src/test/queries/clientpositive/groupby8_with_counters.q new file mode 100644 index 0000000..9698e09 --- /dev/null +++ b/ql/src/test/queries/clientpositive/groupby8_with_counters.q @@ -0,0 +1,34 @@ +set hive.map.aggr=true; +set hive.groupby.skewindata=true; +SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter; + +-- SORT_QUERY_RESULTS + +CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE; +CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE; + +EXPLAIN +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; + +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; + +SELECT DEST1.* FROM DEST1; +SELECT DEST2.* FROM DEST2; + +set hive.multigroupby.singlereducer=false; + +EXPLAIN +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; + +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key; + +SELECT DEST1.* FROM DEST1; +SELECT DEST2.* FROM DEST2; diff --git a/ql/src/test/results/clientpositive/llap/groupby8_with_counters.q.out b/ql/src/test/results/clientpositive/llap/groupby8_with_counters.q.out new file mode 100644 index 0000000..f28edc2 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/groupby8_with_counters.q.out @@ -0,0 +1,1624 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DEST1 +PREHOOK: query: CREATE TABLE DEST2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DEST2 +PREHOOK: query: EXPLAIN +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +PREHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Map 1 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(DISTINCT substr(value, 5)) + keys: key (type: string), substr(value, 5) (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(DISTINCT substr(value, 5)) + keys: key (type: string), substr(value, 5) (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0) + keys: KEY._col0 (type: string) + mode: partials + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: bigint) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: final + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + Reducer 4 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0) + keys: KEY._col0 (type: string) + mode: partials + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: bigint) + Reducer 5 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: final + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-5 + Stats-Aggr Operator + +PREHOOK: query: FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest2 +Stage-2 FILE SYSTEM COUNTERS: +Stage-2 HIVE COUNTERS: + CREATED_FILES: 2 + DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 1000 + RECORDS_IN_Map_1: 500 + RECORDS_OUT_1_default.dest1: 309 + RECORDS_OUT_2_default.dest2: 309 + RECORDS_OUT_INTERMEDIATE_Map_1: 1000 + RECORDS_OUT_INTERMEDIATE_Reducer_2: 309 + RECORDS_OUT_INTERMEDIATE_Reducer_4: 309 +PREHOOK: query: SELECT DEST1.* FROM DEST1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +0 1 +10 1 +100 1 +103 1 +104 1 +105 1 +11 1 +111 1 +113 1 +114 1 +116 1 +118 1 +119 1 +12 1 +120 1 +125 1 +126 1 +128 1 +129 1 +131 1 +133 1 +134 1 +136 1 +137 1 +138 1 +143 1 +145 1 +146 1 +149 1 +15 1 +150 1 +152 1 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 1 +165 1 +166 1 +167 1 +168 1 +169 1 +17 1 +170 1 +172 1 +174 1 +175 1 +176 1 +177 1 +178 1 +179 1 +18 1 +180 1 +181 1 +183 1 +186 1 +187 1 +189 1 +19 1 +190 1 +191 1 +192 1 +193 1 +194 1 +195 1 +196 1 +197 1 +199 1 +2 1 +20 1 +200 1 +201 1 +202 1 +203 1 +205 1 +207 1 +208 1 +209 1 +213 1 +214 1 +216 1 +217 1 +218 1 +219 1 +221 1 +222 1 +223 1 +224 1 +226 1 +228 1 +229 1 +230 1 +233 1 +235 1 +237 1 +238 1 +239 1 +24 1 +241 1 +242 1 +244 1 +247 1 +248 1 +249 1 +252 1 +255 1 +256 1 +257 1 +258 1 +26 1 +260 1 +262 1 +263 1 +265 1 +266 1 +27 1 +272 1 +273 1 +274 1 +275 1 +277 1 +278 1 +28 1 +280 1 +281 1 +282 1 +283 1 +284 1 +285 1 +286 1 +287 1 +288 1 +289 1 +291 1 +292 1 +296 1 +298 1 +30 1 +302 1 +305 1 +306 1 +307 1 +308 1 +309 1 +310 1 +311 1 +315 1 +316 1 +317 1 +318 1 +321 1 +322 1 +323 1 +325 1 +327 1 +33 1 +331 1 +332 1 +333 1 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 1 +344 1 +345 1 +348 1 +35 1 +351 1 +353 1 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 1 +368 1 +369 1 +37 1 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 1 +384 1 +386 1 +389 1 +392 1 +393 1 +394 1 +395 1 +396 1 +397 1 +399 1 +4 1 +400 1 +401 1 +402 1 +403 1 +404 1 +406 1 +407 1 +409 1 +41 1 +411 1 +413 1 +414 1 +417 1 +418 1 +419 1 +42 1 +421 1 +424 1 +427 1 +429 1 +43 1 +430 1 +431 1 +432 1 +435 1 +436 1 +437 1 +438 1 +439 1 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 1 +455 1 +457 1 +458 1 +459 1 +460 1 +462 1 +463 1 +466 1 +467 1 +468 1 +469 1 +47 1 +470 1 +472 1 +475 1 +477 1 +478 1 +479 1 +480 1 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 1 +490 1 +491 1 +492 1 +493 1 +494 1 +495 1 +496 1 +497 1 +498 1 +5 1 +51 1 +53 1 +54 1 +57 1 +58 1 +64 1 +65 1 +66 1 +67 1 +69 1 +70 1 +72 1 +74 1 +76 1 +77 1 +78 1 +8 1 +80 1 +82 1 +83 1 +84 1 +85 1 +86 1 +87 1 +9 1 +90 1 +92 1 +95 1 +96 1 +97 1 +98 1 +PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest2 +#### A masked pattern was here #### +0 1 +10 1 +100 1 +103 1 +104 1 +105 1 +11 1 +111 1 +113 1 +114 1 +116 1 +118 1 +119 1 +12 1 +120 1 +125 1 +126 1 +128 1 +129 1 +131 1 +133 1 +134 1 +136 1 +137 1 +138 1 +143 1 +145 1 +146 1 +149 1 +15 1 +150 1 +152 1 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 1 +165 1 +166 1 +167 1 +168 1 +169 1 +17 1 +170 1 +172 1 +174 1 +175 1 +176 1 +177 1 +178 1 +179 1 +18 1 +180 1 +181 1 +183 1 +186 1 +187 1 +189 1 +19 1 +190 1 +191 1 +192 1 +193 1 +194 1 +195 1 +196 1 +197 1 +199 1 +2 1 +20 1 +200 1 +201 1 +202 1 +203 1 +205 1 +207 1 +208 1 +209 1 +213 1 +214 1 +216 1 +217 1 +218 1 +219 1 +221 1 +222 1 +223 1 +224 1 +226 1 +228 1 +229 1 +230 1 +233 1 +235 1 +237 1 +238 1 +239 1 +24 1 +241 1 +242 1 +244 1 +247 1 +248 1 +249 1 +252 1 +255 1 +256 1 +257 1 +258 1 +26 1 +260 1 +262 1 +263 1 +265 1 +266 1 +27 1 +272 1 +273 1 +274 1 +275 1 +277 1 +278 1 +28 1 +280 1 +281 1 +282 1 +283 1 +284 1 +285 1 +286 1 +287 1 +288 1 +289 1 +291 1 +292 1 +296 1 +298 1 +30 1 +302 1 +305 1 +306 1 +307 1 +308 1 +309 1 +310 1 +311 1 +315 1 +316 1 +317 1 +318 1 +321 1 +322 1 +323 1 +325 1 +327 1 +33 1 +331 1 +332 1 +333 1 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 1 +344 1 +345 1 +348 1 +35 1 +351 1 +353 1 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 1 +368 1 +369 1 +37 1 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 1 +384 1 +386 1 +389 1 +392 1 +393 1 +394 1 +395 1 +396 1 +397 1 +399 1 +4 1 +400 1 +401 1 +402 1 +403 1 +404 1 +406 1 +407 1 +409 1 +41 1 +411 1 +413 1 +414 1 +417 1 +418 1 +419 1 +42 1 +421 1 +424 1 +427 1 +429 1 +43 1 +430 1 +431 1 +432 1 +435 1 +436 1 +437 1 +438 1 +439 1 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 1 +455 1 +457 1 +458 1 +459 1 +460 1 +462 1 +463 1 +466 1 +467 1 +468 1 +469 1 +47 1 +470 1 +472 1 +475 1 +477 1 +478 1 +479 1 +480 1 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 1 +490 1 +491 1 +492 1 +493 1 +494 1 +495 1 +496 1 +497 1 +498 1 +5 1 +51 1 +53 1 +54 1 +57 1 +58 1 +64 1 +65 1 +66 1 +67 1 +69 1 +70 1 +72 1 +74 1 +76 1 +77 1 +78 1 +8 1 +80 1 +82 1 +83 1 +84 1 +85 1 +86 1 +87 1 +9 1 +90 1 +92 1 +95 1 +96 1 +97 1 +98 1 +PREHOOK: query: EXPLAIN +FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +PREHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Map 1 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(DISTINCT substr(value, 5)) + keys: key (type: string), substr(value, 5) (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(DISTINCT substr(value, 5)) + keys: key (type: string), substr(value, 5) (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 69750 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0) + keys: KEY._col0 (type: string) + mode: partials + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: bigint) + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: final + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + Reducer 4 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col1:0._col0) + keys: KEY._col0 (type: string) + mode: partials + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: bigint) + Reducer 5 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: final + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 205 Data size: 2460 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-5 + Stats-Aggr Operator + +PREHOOK: query: FROM SRC +INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest2 +Stage-2 FILE SYSTEM COUNTERS: +Stage-2 HIVE COUNTERS: + CREATED_FILES: 2 + DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 1000 + RECORDS_IN_Map_1: 500 + RECORDS_OUT_1_default.dest1: 309 + RECORDS_OUT_2_default.dest2: 309 + RECORDS_OUT_INTERMEDIATE_Map_1: 1000 + RECORDS_OUT_INTERMEDIATE_Reducer_2: 309 + RECORDS_OUT_INTERMEDIATE_Reducer_4: 309 +PREHOOK: query: SELECT DEST1.* FROM DEST1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +0 1 +10 1 +100 1 +103 1 +104 1 +105 1 +11 1 +111 1 +113 1 +114 1 +116 1 +118 1 +119 1 +12 1 +120 1 +125 1 +126 1 +128 1 +129 1 +131 1 +133 1 +134 1 +136 1 +137 1 +138 1 +143 1 +145 1 +146 1 +149 1 +15 1 +150 1 +152 1 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 1 +165 1 +166 1 +167 1 +168 1 +169 1 +17 1 +170 1 +172 1 +174 1 +175 1 +176 1 +177 1 +178 1 +179 1 +18 1 +180 1 +181 1 +183 1 +186 1 +187 1 +189 1 +19 1 +190 1 +191 1 +192 1 +193 1 +194 1 +195 1 +196 1 +197 1 +199 1 +2 1 +20 1 +200 1 +201 1 +202 1 +203 1 +205 1 +207 1 +208 1 +209 1 +213 1 +214 1 +216 1 +217 1 +218 1 +219 1 +221 1 +222 1 +223 1 +224 1 +226 1 +228 1 +229 1 +230 1 +233 1 +235 1 +237 1 +238 1 +239 1 +24 1 +241 1 +242 1 +244 1 +247 1 +248 1 +249 1 +252 1 +255 1 +256 1 +257 1 +258 1 +26 1 +260 1 +262 1 +263 1 +265 1 +266 1 +27 1 +272 1 +273 1 +274 1 +275 1 +277 1 +278 1 +28 1 +280 1 +281 1 +282 1 +283 1 +284 1 +285 1 +286 1 +287 1 +288 1 +289 1 +291 1 +292 1 +296 1 +298 1 +30 1 +302 1 +305 1 +306 1 +307 1 +308 1 +309 1 +310 1 +311 1 +315 1 +316 1 +317 1 +318 1 +321 1 +322 1 +323 1 +325 1 +327 1 +33 1 +331 1 +332 1 +333 1 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 1 +344 1 +345 1 +348 1 +35 1 +351 1 +353 1 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 1 +368 1 +369 1 +37 1 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 1 +384 1 +386 1 +389 1 +392 1 +393 1 +394 1 +395 1 +396 1 +397 1 +399 1 +4 1 +400 1 +401 1 +402 1 +403 1 +404 1 +406 1 +407 1 +409 1 +41 1 +411 1 +413 1 +414 1 +417 1 +418 1 +419 1 +42 1 +421 1 +424 1 +427 1 +429 1 +43 1 +430 1 +431 1 +432 1 +435 1 +436 1 +437 1 +438 1 +439 1 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 1 +455 1 +457 1 +458 1 +459 1 +460 1 +462 1 +463 1 +466 1 +467 1 +468 1 +469 1 +47 1 +470 1 +472 1 +475 1 +477 1 +478 1 +479 1 +480 1 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 1 +490 1 +491 1 +492 1 +493 1 +494 1 +495 1 +496 1 +497 1 +498 1 +5 1 +51 1 +53 1 +54 1 +57 1 +58 1 +64 1 +65 1 +66 1 +67 1 +69 1 +70 1 +72 1 +74 1 +76 1 +77 1 +78 1 +8 1 +80 1 +82 1 +83 1 +84 1 +85 1 +86 1 +87 1 +9 1 +90 1 +92 1 +95 1 +96 1 +97 1 +98 1 +PREHOOK: query: SELECT DEST2.* FROM DEST2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest2 +#### A masked pattern was here #### +0 1 +10 1 +100 1 +103 1 +104 1 +105 1 +11 1 +111 1 +113 1 +114 1 +116 1 +118 1 +119 1 +12 1 +120 1 +125 1 +126 1 +128 1 +129 1 +131 1 +133 1 +134 1 +136 1 +137 1 +138 1 +143 1 +145 1 +146 1 +149 1 +15 1 +150 1 +152 1 +153 1 +155 1 +156 1 +157 1 +158 1 +160 1 +162 1 +163 1 +164 1 +165 1 +166 1 +167 1 +168 1 +169 1 +17 1 +170 1 +172 1 +174 1 +175 1 +176 1 +177 1 +178 1 +179 1 +18 1 +180 1 +181 1 +183 1 +186 1 +187 1 +189 1 +19 1 +190 1 +191 1 +192 1 +193 1 +194 1 +195 1 +196 1 +197 1 +199 1 +2 1 +20 1 +200 1 +201 1 +202 1 +203 1 +205 1 +207 1 +208 1 +209 1 +213 1 +214 1 +216 1 +217 1 +218 1 +219 1 +221 1 +222 1 +223 1 +224 1 +226 1 +228 1 +229 1 +230 1 +233 1 +235 1 +237 1 +238 1 +239 1 +24 1 +241 1 +242 1 +244 1 +247 1 +248 1 +249 1 +252 1 +255 1 +256 1 +257 1 +258 1 +26 1 +260 1 +262 1 +263 1 +265 1 +266 1 +27 1 +272 1 +273 1 +274 1 +275 1 +277 1 +278 1 +28 1 +280 1 +281 1 +282 1 +283 1 +284 1 +285 1 +286 1 +287 1 +288 1 +289 1 +291 1 +292 1 +296 1 +298 1 +30 1 +302 1 +305 1 +306 1 +307 1 +308 1 +309 1 +310 1 +311 1 +315 1 +316 1 +317 1 +318 1 +321 1 +322 1 +323 1 +325 1 +327 1 +33 1 +331 1 +332 1 +333 1 +335 1 +336 1 +338 1 +339 1 +34 1 +341 1 +342 1 +344 1 +345 1 +348 1 +35 1 +351 1 +353 1 +356 1 +360 1 +362 1 +364 1 +365 1 +366 1 +367 1 +368 1 +369 1 +37 1 +373 1 +374 1 +375 1 +377 1 +378 1 +379 1 +382 1 +384 1 +386 1 +389 1 +392 1 +393 1 +394 1 +395 1 +396 1 +397 1 +399 1 +4 1 +400 1 +401 1 +402 1 +403 1 +404 1 +406 1 +407 1 +409 1 +41 1 +411 1 +413 1 +414 1 +417 1 +418 1 +419 1 +42 1 +421 1 +424 1 +427 1 +429 1 +43 1 +430 1 +431 1 +432 1 +435 1 +436 1 +437 1 +438 1 +439 1 +44 1 +443 1 +444 1 +446 1 +448 1 +449 1 +452 1 +453 1 +454 1 +455 1 +457 1 +458 1 +459 1 +460 1 +462 1 +463 1 +466 1 +467 1 +468 1 +469 1 +47 1 +470 1 +472 1 +475 1 +477 1 +478 1 +479 1 +480 1 +481 1 +482 1 +483 1 +484 1 +485 1 +487 1 +489 1 +490 1 +491 1 +492 1 +493 1 +494 1 +495 1 +496 1 +497 1 +498 1 +5 1 +51 1 +53 1 +54 1 +57 1 +58 1 +64 1 +65 1 +66 1 +67 1 +69 1 +70 1 +72 1 +74 1 +76 1 +77 1 +78 1 +8 1 +80 1 +82 1 +83 1 +84 1 +85 1 +86 1 +87 1 +9 1 +90 1 +92 1 +95 1 +96 1 +97 1 +98 1 diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out index bd81989..0fb6361 100644 --- a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out @@ -229,6 +229,7 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 0 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 RECORDS_OUT_INTERMEDIATE_Map_1: 1 @@ -255,6 +256,7 @@ Stage-1 FILE SYSTEM COUNTERS: HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: CREATED_FILES: 1 + GROUPBY_FLUSHES: 0 RECORDS_OUT_0: 1 0 PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group) @@ -271,9 +273,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 249 CACHE_MISS_BYTES: 0 @@ -297,9 +300,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 1000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 249 CACHE_MISS_BYTES: 0 @@ -323,9 +327,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 249 CACHE_MISS_BYTES: 0 @@ -349,9 +354,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 249 CACHE_MISS_BYTES: 0 @@ -379,6 +385,7 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 0 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 RECORDS_OUT_INTERMEDIATE_Map_1: 1 @@ -398,6 +405,7 @@ Stage-1 FILE SYSTEM COUNTERS: HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: CREATED_FILES: 1 + GROUPBY_FLUSHES: 0 RECORDS_OUT_0: 1 0 PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group) @@ -414,9 +422,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -434,9 +443,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 1000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -454,9 +464,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -474,9 +485,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -498,6 +510,7 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 0 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 RECORDS_OUT_INTERMEDIATE_Map_1: 1 @@ -517,6 +530,7 @@ Stage-1 FILE SYSTEM COUNTERS: HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: CREATED_FILES: 1 + GROUPBY_FLUSHES: 0 RECORDS_OUT_0: 1 0 PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group) @@ -533,9 +547,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -553,9 +568,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 1000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -573,9 +589,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -593,9 +610,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -617,6 +635,7 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 0 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 RECORDS_OUT_INTERMEDIATE_Map_1: 1 @@ -636,6 +655,7 @@ Stage-1 FILE SYSTEM COUNTERS: HDFS_WRITE_OPS: 2 Stage-1 HIVE COUNTERS: CREATED_FILES: 1 + GROUPBY_FLUSHES: 0 RECORDS_OUT_0: 1 0 PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group) @@ -652,9 +672,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -672,9 +693,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 1000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -692,9 +714,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -712,9 +735,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -736,9 +760,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 1566 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 1566 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 1566 @@ -756,9 +781,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 1566 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 1566 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 1566 @@ -776,9 +802,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -796,9 +823,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 8 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 8 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 8 @@ -816,9 +844,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -836,9 +865,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 18 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 18 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 18 @@ -856,9 +886,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: ALLOCATED_BYTES: 262144 ALLOCATED_USED_BYTES: 8400 @@ -884,9 +915,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 4809 CACHE_MISS_BYTES: 0 @@ -913,9 +945,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 2 @@ -932,9 +965,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 2 @@ -955,9 +989,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 2 @@ -974,9 +1009,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 2 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 2 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 2 @@ -994,9 +1030,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: ALLOCATED_BYTES: 786432 ALLOCATED_USED_BYTES: 11299 @@ -1022,9 +1059,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 3980 CACHE_MISS_BYTES: 0 @@ -1051,9 +1089,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1070,9 +1109,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1093,9 +1133,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1112,9 +1153,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1135,9 +1177,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1154,9 +1197,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 6 @@ -1177,9 +1221,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 3980 CACHE_MISS_BYTES: 0 @@ -1202,9 +1247,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 6 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 6 Stage-1 LLAP IO COUNTERS: CACHE_HIT_BYTES: 3980 CACHE_MISS_BYTES: 0 @@ -1232,9 +1278,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 4 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 4 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 4 @@ -1251,9 +1298,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 4 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 4 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 4 @@ -1270,9 +1318,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 4 RECORDS_IN_Map_1: 1000 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 4 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 4 @@ -1289,9 +1338,10 @@ Stage-1 FILE SYSTEM COUNTERS: Stage-1 HIVE COUNTERS: CREATED_FILES: 1 DESERIALIZE_ERRORS: 0 + GROUPBY_FLUSHES: 4 RECORDS_IN_Map_1: 2100 RECORDS_OUT_0: 1 - RECORDS_OUT_INTERMEDIATE_Map_1: 1 + RECORDS_OUT_INTERMEDIATE_Map_1: 4 Stage-1 LLAP IO COUNTERS: METADATA_CACHE_HIT: 1 4