diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index c710b0b..d807fcf 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -353,6 +353,7 @@ minitez.query.files=bucket_map_join_tez1.q,\ tez_union_group_by.q,\ tez_smb_main.q,\ tez_smb_1.q,\ + tez_smb_fail.q,\ vectorized_dynamic_partition_pruning.q,\ tez_multi_union.q,\ tez_join.q,\ diff --git pom.xml pom.xml index d2a5d52..f27872f 100644 --- pom.xml +++ pom.xml @@ -160,7 +160,7 @@ 1.0.1 1.7.5 4.0.4 - 0.5.2 + 0.5.4 2.2.0 1.4.0 2.10 diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java index 24af765..296a92d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java @@ -143,7 +143,13 @@ public CommonMergeJoinOperator() { for (byte pos = 0; pos < order.length; pos++) { if (pos != posBigTable) { - fetchDone[pos] = false; + if ((parentOperators != null) && (parentOperators.isEmpty() == false) + && (parentOperators.get(pos) instanceof TezDummyStoreOperator)) { + TezDummyStoreOperator dummyStoreOp = (TezDummyStoreOperator) parentOperators.get(pos); + fetchDone[pos] = dummyStoreOp.getFetchDone(); + } else { + fetchDone[pos] = false; + } } foundNextKeyGroup[pos] = false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index d5ea96a..f85d7ad 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.exec.MapOperator.MapOpCtx; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor; import org.apache.hadoop.hive.ql.io.RecordIdentifier; @@ -55,6 +56,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -330,6 +332,24 @@ else if (partRawRowObjectInspector.equals(tblRawRowObjectInspector)) { return tableDescOI; } + public void initEmptyInputChildren(List> children, Configuration hconf) + throws SerDeException, Exception { + setChildOperators(children); + for (Operator child : children) { + TableScanOperator tsOp = (TableScanOperator) child; + StructObjectInspector soi = null; + PartitionDesc partDesc = conf.getAliasToPartnInfo().get(tsOp.getConf().getAlias()); + Deserializer serde = partDesc.getTableDesc().getDeserializer(); + partDesc.setProperties(partDesc.getProperties()); + MapOpCtx opCtx = new MapOpCtx(tsOp.getConf().getAlias(), child, partDesc); + StructObjectInspector tableRowOI = (StructObjectInspector) serde.getObjectInspector(); + initObjectInspector(hconf, opCtx, tableRowOI); + soi = opCtx.rowObjectInspector; + child.getParentOperators().add(this); + childrenOpToOI.put(child, soi); + } + } + public void setChildren(Configuration hconf) throws Exception { List> children = diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java index 6bd156b..e9f65be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java @@ -27,6 +27,7 @@ * */ public class TezDummyStoreOperator extends DummyStoreOperator { + private boolean fetchDone = false; /** * Unlike the MR counterpoint, on Tez we want processOp to forward @@ -37,4 +38,12 @@ public void process(Object row, int tag) throws HiveException { super.process(row, tag); forward(result.o, outputObjInspector); } + + public boolean getFetchDone() { + return fetchDone; + } + + public void setFetchDone(boolean fetchDone) { + this.fetchDone = fetchDone; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java index 446916c..cbea27c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java @@ -336,6 +336,7 @@ private void processAllSideEvents(String inputName, + " multi mr inputs. " + bucketToTaskMap); Integer[] numSplitsForTask = new Integer[taskCount]; + Arrays.fill(numSplitsForTask, 0); Multimap bucketToSerializedSplitMap = LinkedListMultimap.create(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index f606ec0..b95966e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorUtils; +import org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; @@ -170,9 +171,19 @@ void init(MRTaskReporter mrReporter, l4j.info("Input name is " + mergeMapWork.getName()); jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName()); mergeMapOp.initialize(jconf, null); - mergeMapOp.setChildren(jconf); + boolean skipRead = mergeMapOp.getConf().getPathToAliases().isEmpty(); + if (skipRead) { + List> children = new ArrayList>(); + children.addAll(mergeMapOp.getConf().getAliasToWork().values()); + mergeMapOp.initEmptyInputChildren(children, jconf); + } else { + mergeMapOp.setChildren(jconf); + } DummyStoreOperator dummyOp = getJoinParentOp(mergeMapOp); + if (dummyOp instanceof TezDummyStoreOperator) { + ((TezDummyStoreOperator) dummyOp).setFetchDone(skipRead); + } connectOps.put(mergeMapWork.getTag(), dummyOp); mergeMapOp.passExecContext(new ExecMapperContext(jconf)); @@ -252,8 +263,10 @@ private void initializeMapRecordSources() throws Exception { MultiMRInput multiMRInput = multiMRInputMap.get(inputName); Collection kvReaders = multiMRInput.getKeyValueReaders(); l4j.debug("There are " + kvReaders.size() + " key-value readers for input " + inputName); - reader = getKeyValueReader(kvReaders, mapOp); - sources[tag].init(jconf, mapOp, reader); + if (kvReaders.size() > 0) { + reader = getKeyValueReader(kvReaders, mapOp); + sources[tag].init(jconf, mapOp, reader); + } } ((TezContext) MapredContext.get()).setRecordSources(sources); } diff --git ql/src/test/queries/clientpositive/tez_smb_fail.q ql/src/test/queries/clientpositive/tez_smb_fail.q new file mode 100644 index 0000000..db0a8df --- /dev/null +++ ql/src/test/queries/clientpositive/tez_smb_fail.q @@ -0,0 +1,42 @@ +set hive.explain.user=false; +set hive.join.emit.interval=2; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; +set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; + +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); + +set hive.enforce.bucketing=true; +set hive.enforce.sorting = true; +set hive.optimize.bucketingsorting=false; +insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part; + +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin; + +set hive.auto.convert.sortmerge.join = true; + +set hive.auto.convert.join.noconditionaltask.size=500; +CREATE TABLE empty(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; + +explain +select count(*) from tab s1 join empty s3 on s1.key=s3.key; + +select count(*) from tab s1 join empty s3 on s1.key=s3.key; + +explain +select * from tab s1 left outer join empty s3 on s1.key=s3.key; + +select * from tab s1 left outer join empty s3 on s1.key=s3.key; diff --git ql/src/test/results/clientpositive/tez/tez_smb_fail.q.out ql/src/test/results/clientpositive/tez/tez_smb_fail.q.out new file mode 100644 index 0000000..73f1d63 --- /dev/null +++ ql/src/test/results/clientpositive/tez/tez_smb_fail.q.out @@ -0,0 +1,508 @@ +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE empty(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@empty +POSTHOOK: query: CREATE TABLE empty(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@empty +PREHOOK: query: explain +select count(*) from tab s1 join empty s3 on s1.key=s3.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from tab s1 join empty s3 on s1.key=s3.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: s3 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Map Operator Tree: + TableScan + alias: s1 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tab s1 join empty s3 on s1.key=s3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@empty +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab s1 join empty s3 on s1.key=s3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@empty +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +0 +PREHOOK: query: explain +select * from tab s1 left outer join empty s3 on s1.key=s3.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from tab s1 left outer join empty s3 on s1.key=s3.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: s3 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Map Operator Tree: + TableScan + alias: s1 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string), ds (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Left Outer Join0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 266 Data size: 2822 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 266 Data size: 2822 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from tab s1 left outer join empty s3 on s1.key=s3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@empty +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select * from tab s1 left outer join empty s3 on s1.key=s3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@empty +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +0 val_0 2008-04-08 NULL NULL NULL +0 val_0 2008-04-08 NULL NULL NULL +0 val_0 2008-04-08 NULL NULL NULL +2 val_2 2008-04-08 NULL NULL NULL +4 val_4 2008-04-08 NULL NULL NULL +8 val_8 2008-04-08 NULL NULL NULL +20 val_20 2008-04-08 NULL NULL NULL +24 val_24 2008-04-08 NULL NULL NULL +24 val_24 2008-04-08 NULL NULL NULL +26 val_26 2008-04-08 NULL NULL NULL +26 val_26 2008-04-08 NULL NULL NULL +28 val_28 2008-04-08 NULL NULL NULL +42 val_42 2008-04-08 NULL NULL NULL +42 val_42 2008-04-08 NULL NULL NULL +44 val_44 2008-04-08 NULL NULL NULL +64 val_64 2008-04-08 NULL NULL NULL +66 val_66 2008-04-08 NULL NULL NULL +80 val_80 2008-04-08 NULL NULL NULL +82 val_82 2008-04-08 NULL NULL NULL +84 val_84 2008-04-08 NULL NULL NULL +84 val_84 2008-04-08 NULL NULL NULL +86 val_86 2008-04-08 NULL NULL NULL +114 val_114 2008-04-08 NULL NULL NULL +116 val_116 2008-04-08 NULL NULL NULL +118 val_118 2008-04-08 NULL NULL NULL +118 val_118 2008-04-08 NULL NULL NULL +134 val_134 2008-04-08 NULL NULL NULL +134 val_134 2008-04-08 NULL NULL NULL +136 val_136 2008-04-08 NULL NULL NULL +138 val_138 2008-04-08 NULL NULL NULL +138 val_138 2008-04-08 NULL NULL NULL +138 val_138 2008-04-08 NULL NULL NULL +138 val_138 2008-04-08 NULL NULL NULL +150 val_150 2008-04-08 NULL NULL NULL +152 val_152 2008-04-08 NULL NULL NULL +152 val_152 2008-04-08 NULL NULL NULL +156 val_156 2008-04-08 NULL NULL NULL +158 val_158 2008-04-08 NULL NULL NULL +170 val_170 2008-04-08 NULL NULL NULL +172 val_172 2008-04-08 NULL NULL NULL +172 val_172 2008-04-08 NULL NULL NULL +174 val_174 2008-04-08 NULL NULL NULL +174 val_174 2008-04-08 NULL NULL NULL +176 val_176 2008-04-08 NULL NULL NULL +176 val_176 2008-04-08 NULL NULL NULL +178 val_178 2008-04-08 NULL NULL NULL +190 val_190 2008-04-08 NULL NULL NULL +192 val_192 2008-04-08 NULL NULL NULL +194 val_194 2008-04-08 NULL NULL NULL +196 val_196 2008-04-08 NULL NULL NULL +200 val_200 2008-04-08 NULL NULL NULL +200 val_200 2008-04-08 NULL NULL NULL +202 val_202 2008-04-08 NULL NULL NULL +208 val_208 2008-04-08 NULL NULL NULL +208 val_208 2008-04-08 NULL NULL NULL +208 val_208 2008-04-08 NULL NULL NULL +222 val_222 2008-04-08 NULL NULL NULL +224 val_224 2008-04-08 NULL NULL NULL +224 val_224 2008-04-08 NULL NULL NULL +226 val_226 2008-04-08 NULL NULL NULL +228 val_228 2008-04-08 NULL NULL NULL +242 val_242 2008-04-08 NULL NULL NULL +242 val_242 2008-04-08 NULL NULL NULL +244 val_244 2008-04-08 NULL NULL NULL +248 val_248 2008-04-08 NULL NULL NULL +260 val_260 2008-04-08 NULL NULL NULL +262 val_262 2008-04-08 NULL NULL NULL +266 val_266 2008-04-08 NULL NULL NULL +280 val_280 2008-04-08 NULL NULL NULL +280 val_280 2008-04-08 NULL NULL NULL +282 val_282 2008-04-08 NULL NULL NULL +282 val_282 2008-04-08 NULL NULL NULL +284 val_284 2008-04-08 NULL NULL NULL +286 val_286 2008-04-08 NULL NULL NULL +288 val_288 2008-04-08 NULL NULL NULL +288 val_288 2008-04-08 NULL NULL NULL +310 val_310 2008-04-08 NULL NULL NULL +316 val_316 2008-04-08 NULL NULL NULL +316 val_316 2008-04-08 NULL NULL NULL +316 val_316 2008-04-08 NULL NULL NULL +318 val_318 2008-04-08 NULL NULL NULL +318 val_318 2008-04-08 NULL NULL NULL +318 val_318 2008-04-08 NULL NULL NULL +332 val_332 2008-04-08 NULL NULL NULL +336 val_336 2008-04-08 NULL NULL NULL +338 val_338 2008-04-08 NULL NULL NULL +356 val_356 2008-04-08 NULL NULL NULL +374 val_374 2008-04-08 NULL NULL NULL +378 val_378 2008-04-08 NULL NULL NULL +392 val_392 2008-04-08 NULL NULL NULL +394 val_394 2008-04-08 NULL NULL NULL +396 val_396 2008-04-08 NULL NULL NULL +396 val_396 2008-04-08 NULL NULL NULL +396 val_396 2008-04-08 NULL NULL NULL +400 val_400 2008-04-08 NULL NULL NULL +402 val_402 2008-04-08 NULL NULL NULL +404 val_404 2008-04-08 NULL NULL NULL +404 val_404 2008-04-08 NULL NULL NULL +406 val_406 2008-04-08 NULL NULL NULL +406 val_406 2008-04-08 NULL NULL NULL +406 val_406 2008-04-08 NULL NULL NULL +406 val_406 2008-04-08 NULL NULL NULL +424 val_424 2008-04-08 NULL NULL NULL +424 val_424 2008-04-08 NULL NULL NULL +444 val_444 2008-04-08 NULL NULL NULL +446 val_446 2008-04-08 NULL NULL NULL +448 val_448 2008-04-08 NULL NULL NULL +460 val_460 2008-04-08 NULL NULL NULL +462 val_462 2008-04-08 NULL NULL NULL +462 val_462 2008-04-08 NULL NULL NULL +466 val_466 2008-04-08 NULL NULL NULL +466 val_466 2008-04-08 NULL NULL NULL +466 val_466 2008-04-08 NULL NULL NULL +468 val_468 2008-04-08 NULL NULL NULL +468 val_468 2008-04-08 NULL NULL NULL +468 val_468 2008-04-08 NULL NULL NULL +468 val_468 2008-04-08 NULL NULL NULL +480 val_480 2008-04-08 NULL NULL NULL +480 val_480 2008-04-08 NULL NULL NULL +480 val_480 2008-04-08 NULL NULL NULL +482 val_482 2008-04-08 NULL NULL NULL +484 val_484 2008-04-08 NULL NULL NULL +11 val_11 2008-04-08 NULL NULL NULL +15 val_15 2008-04-08 NULL NULL NULL +15 val_15 2008-04-08 NULL NULL NULL +17 val_17 2008-04-08 NULL NULL NULL +19 val_19 2008-04-08 NULL NULL NULL +33 val_33 2008-04-08 NULL NULL NULL +35 val_35 2008-04-08 NULL NULL NULL +35 val_35 2008-04-08 NULL NULL NULL +35 val_35 2008-04-08 NULL NULL NULL +37 val_37 2008-04-08 NULL NULL NULL +37 val_37 2008-04-08 NULL NULL NULL +51 val_51 2008-04-08 NULL NULL NULL +51 val_51 2008-04-08 NULL NULL NULL +53 val_53 2008-04-08 NULL NULL NULL +57 val_57 2008-04-08 NULL NULL NULL +77 val_77 2008-04-08 NULL NULL NULL +95 val_95 2008-04-08 NULL NULL NULL +95 val_95 2008-04-08 NULL NULL NULL +97 val_97 2008-04-08 NULL NULL NULL +97 val_97 2008-04-08 NULL NULL NULL +103 val_103 2008-04-08 NULL NULL NULL +103 val_103 2008-04-08 NULL NULL NULL +105 val_105 2008-04-08 NULL NULL NULL +125 val_125 2008-04-08 NULL NULL NULL +125 val_125 2008-04-08 NULL NULL NULL +129 val_129 2008-04-08 NULL NULL NULL +129 val_129 2008-04-08 NULL NULL NULL +143 val_143 2008-04-08 NULL NULL NULL +145 val_145 2008-04-08 NULL NULL NULL +149 val_149 2008-04-08 NULL NULL NULL +149 val_149 2008-04-08 NULL NULL NULL +163 val_163 2008-04-08 NULL NULL NULL +165 val_165 2008-04-08 NULL NULL NULL +165 val_165 2008-04-08 NULL NULL NULL +167 val_167 2008-04-08 NULL NULL NULL +167 val_167 2008-04-08 NULL NULL NULL +167 val_167 2008-04-08 NULL NULL NULL +169 val_169 2008-04-08 NULL NULL NULL +169 val_169 2008-04-08 NULL NULL NULL +169 val_169 2008-04-08 NULL NULL NULL +169 val_169 2008-04-08 NULL NULL NULL +181 val_181 2008-04-08 NULL NULL NULL +183 val_183 2008-04-08 NULL NULL NULL +187 val_187 2008-04-08 NULL NULL NULL +187 val_187 2008-04-08 NULL NULL NULL +187 val_187 2008-04-08 NULL NULL NULL +189 val_189 2008-04-08 NULL NULL NULL +213 val_213 2008-04-08 NULL NULL NULL +213 val_213 2008-04-08 NULL NULL NULL +217 val_217 2008-04-08 NULL NULL NULL +217 val_217 2008-04-08 NULL NULL NULL +219 val_219 2008-04-08 NULL NULL NULL +219 val_219 2008-04-08 NULL NULL NULL +233 val_233 2008-04-08 NULL NULL NULL +233 val_233 2008-04-08 NULL NULL NULL +235 val_235 2008-04-08 NULL NULL NULL +237 val_237 2008-04-08 NULL NULL NULL +237 val_237 2008-04-08 NULL NULL NULL +239 val_239 2008-04-08 NULL NULL NULL +239 val_239 2008-04-08 NULL NULL NULL +255 val_255 2008-04-08 NULL NULL NULL +255 val_255 2008-04-08 NULL NULL NULL +257 val_257 2008-04-08 NULL NULL NULL +273 val_273 2008-04-08 NULL NULL NULL +273 val_273 2008-04-08 NULL NULL NULL +273 val_273 2008-04-08 NULL NULL NULL +275 val_275 2008-04-08 NULL NULL NULL +277 val_277 2008-04-08 NULL NULL NULL +277 val_277 2008-04-08 NULL NULL NULL +277 val_277 2008-04-08 NULL NULL NULL +277 val_277 2008-04-08 NULL NULL NULL +291 val_291 2008-04-08 NULL NULL NULL +305 val_305 2008-04-08 NULL NULL NULL +307 val_307 2008-04-08 NULL NULL NULL +307 val_307 2008-04-08 NULL NULL NULL +309 val_309 2008-04-08 NULL NULL NULL +309 val_309 2008-04-08 NULL NULL NULL +321 val_321 2008-04-08 NULL NULL NULL +321 val_321 2008-04-08 NULL NULL NULL +323 val_323 2008-04-08 NULL NULL NULL +325 val_325 2008-04-08 NULL NULL NULL +325 val_325 2008-04-08 NULL NULL NULL +327 val_327 2008-04-08 NULL NULL NULL +327 val_327 2008-04-08 NULL NULL NULL +327 val_327 2008-04-08 NULL NULL NULL +341 val_341 2008-04-08 NULL NULL NULL +345 val_345 2008-04-08 NULL NULL NULL +365 val_365 2008-04-08 NULL NULL NULL +367 val_367 2008-04-08 NULL NULL NULL +367 val_367 2008-04-08 NULL NULL NULL +369 val_369 2008-04-08 NULL NULL NULL +369 val_369 2008-04-08 NULL NULL NULL +369 val_369 2008-04-08 NULL NULL NULL +389 val_389 2008-04-08 NULL NULL NULL +411 val_411 2008-04-08 NULL NULL NULL +413 val_413 2008-04-08 NULL NULL NULL +413 val_413 2008-04-08 NULL NULL NULL +417 val_417 2008-04-08 NULL NULL NULL +417 val_417 2008-04-08 NULL NULL NULL +417 val_417 2008-04-08 NULL NULL NULL +419 val_419 2008-04-08 NULL NULL NULL +431 val_431 2008-04-08 NULL NULL NULL +431 val_431 2008-04-08 NULL NULL NULL +431 val_431 2008-04-08 NULL NULL NULL +435 val_435 2008-04-08 NULL NULL NULL +437 val_437 2008-04-08 NULL NULL NULL +439 val_439 2008-04-08 NULL NULL NULL +439 val_439 2008-04-08 NULL NULL NULL +453 val_453 2008-04-08 NULL NULL NULL +455 val_455 2008-04-08 NULL NULL NULL +457 val_457 2008-04-08 NULL NULL NULL +459 val_459 2008-04-08 NULL NULL NULL +459 val_459 2008-04-08 NULL NULL NULL +475 val_475 2008-04-08 NULL NULL NULL +477 val_477 2008-04-08 NULL NULL NULL +479 val_479 2008-04-08 NULL NULL NULL +491 val_491 2008-04-08 NULL NULL NULL +493 val_493 2008-04-08 NULL NULL NULL +495 val_495 2008-04-08 NULL NULL NULL +497 val_497 2008-04-08 NULL NULL NULL