diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java index e3b9eef..5d355d2 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Attr.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hive.common.jsonexplain.tez; -public class Attr implements Comparable { - String name; - String value; +public final class Attr implements Comparable { + public final String name; + public final String value; public Attr(String name, String value) { super(); diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java index c7a4ed6..d341cb1 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Connection.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hive.common.jsonexplain.tez; -public class Connection { - public String type; - public Vertex from; +public final class Connection { + public final String type; + public final Vertex from; public Connection(String type, Vertex from) { super(); diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java index fb12f70..9ecba7c 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Op.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hive.common.jsonexplain.tez; -import java.io.PrintStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -28,22 +27,24 @@ import org.json.JSONException; import org.json.JSONObject; -public class Op { - String name; - String operatorId; - Op parent; - List children; - List attrs; +public final class Op { + public final String name; + //tezJsonParser + public final TezJsonParser parser; + public final String operatorId; + public Op parent; + public final List children; + public final List attrs; // the jsonObject for this operator - JSONObject opObject; + public final JSONObject opObject; // the vertex that this operator belongs to - Vertex vertex; + public final Vertex vertex; // the vertex that this operator output to if this operator is a // ReduceOutputOperator - String outputVertexName; + public final String outputVertexName; public Op(String name, String id, String outputVertexName, List children, List attrs, - JSONObject opObject, Vertex vertex) throws JSONException { + JSONObject opObject, Vertex vertex, TezJsonParser tezJsonParser) throws JSONException { super(); this.name = name; this.operatorId = id; @@ -52,6 +53,7 @@ public Op(String name, String id, String outputVertexName, List children, Li this.attrs = attrs; this.opObject = opObject; this.vertex = vertex; + this.parser = tezJsonParser; } private void inlineJoinOp() throws Exception { @@ -73,7 +75,7 @@ private void inlineJoinOp() throws Exception { } } if (c != null) { - TezJsonParser.addInline(this, c); + parser.addInline(this, c); } } // update the attrs @@ -96,14 +98,12 @@ private void inlineJoinOp() throws Exception { } } // inline merge join operator in a self-join - else if (this.name.equals("Merge Join Operator")) { + else { if (this.vertex != null) { for (Vertex v : this.vertex.mergeJoinDummyVertexs) { - TezJsonParser.addInline(this, new Connection(null, v)); + parser.addInline(this, new Connection(null, v)); } } - } else { - throw new Exception("Unknown join operator"); } } @@ -123,23 +123,23 @@ private String getNameWithOpId() { * operator so that we can decide the corresponding indent. * @throws Exception */ - public void print(PrintStream out, List indentFlag, boolean branchOfJoinOp) + public void print(Printer printer, List indentFlag, boolean branchOfJoinOp) throws Exception { // print name - if (TezJsonParser.printSet.contains(this)) { - out.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous " + if (parser.printSet.contains(this)) { + printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous " + this.getNameWithOpId()); return; } - TezJsonParser.printSet.add(this); + parser.printSet.add(this); if (!branchOfJoinOp) { - out.println(TezJsonParser.prefixString(indentFlag) + this.getNameWithOpId()); + printer.println(TezJsonParser.prefixString(indentFlag) + this.getNameWithOpId()); } else { - out.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.getNameWithOpId()); + printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.getNameWithOpId()); } branchOfJoinOp = false; - // if this operator is a join operator - if (this.name.contains("Join")) { + // if this operator is a Map Join Operator or a Merge Join Operator + if (this.name.equals("Map Join Operator") || this.name.equals("Merge Join Operator")) { inlineJoinOp(); branchOfJoinOp = true; } @@ -149,7 +149,7 @@ public void print(PrintStream out, List indentFlag, boolean branchOfJoi if (this.parent == null) { if (this.vertex != null) { for (Connection connection : this.vertex.parentConnections) { - if (!TezJsonParser.isInline(connection.from)) { + if (!parser.isInline(connection.from)) { noninlined.add(connection); } } @@ -167,12 +167,12 @@ public void print(PrintStream out, List indentFlag, boolean branchOfJoi } Collections.sort(attrs); for (Attr attr : attrs) { - out.println(TezJsonParser.prefixString(attFlag) + attr.toString()); + printer.println(TezJsonParser.prefixString(attFlag) + attr.toString()); } // print inline vertex - if (TezJsonParser.inlineMap.containsKey(this)) { - for (int index = 0; index < TezJsonParser.inlineMap.get(this).size(); index++) { - Connection connection = TezJsonParser.inlineMap.get(this).get(index); + if (parser.inlineMap.containsKey(this)) { + for (int index = 0; index < parser.inlineMap.get(this).size(); index++) { + Connection connection = parser.inlineMap.get(this).get(index); List vertexFlag = new ArrayList<>(); vertexFlag.addAll(indentFlag); if (branchOfJoinOp) { @@ -185,7 +185,7 @@ public void print(PrintStream out, List indentFlag, boolean branchOfJoi else { vertexFlag.add(false); } - connection.from.print(out, vertexFlag, connection.type, this.vertex); + connection.from.print(printer, vertexFlag, connection.type, this.vertex); } } // print parent op, i.e., where data comes from @@ -193,7 +193,7 @@ public void print(PrintStream out, List indentFlag, boolean branchOfJoi List parentFlag = new ArrayList<>(); parentFlag.addAll(indentFlag); parentFlag.add(false); - this.parent.print(out, parentFlag, branchOfJoinOp); + this.parent.print(printer, parentFlag, branchOfJoinOp); } // print next vertex else { @@ -206,7 +206,7 @@ public void print(PrintStream out, List indentFlag, boolean branchOfJoi } else { vertexFlag.add(false); } - v.print(out, vertexFlag, noninlined.get(index).type, this.vertex); + v.print(printer, vertexFlag, noninlined.get(index).type, this.vertex); } } } diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java new file mode 100644 index 0000000..d3c91d6 --- /dev/null +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Printer.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.common.jsonexplain.tez; + +public final class Printer { + public static final String lineSeparator = System.getProperty("line.separator");; + private final StringBuilder builder = new StringBuilder(); + + public void print(String string) { + builder.append(string); + } + + public void println(String string) { + builder.append(string); + builder.append(lineSeparator); + } + + public void println() { + builder.append(lineSeparator); + } + + public String toString() { + return builder.toString(); + } +} diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java index 10e0a0c..8fbb838 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Stage.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.common.jsonexplain.tez; import java.io.IOException; -import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -34,30 +33,33 @@ import org.json.JSONException; import org.json.JSONObject; -public class Stage { - String name; +public final class Stage { + //external name is used to show at the console + String externalName; + //internal name is used to track the stages + public final String internalName; + //tezJsonParser + public final TezJsonParser parser; // upstream stages, e.g., root stage - List parentStages; + public final List parentStages = new ArrayList<>(); // downstream stages. - List childStages; - Map vertexs; - List attrs; + public final List childStages = new ArrayList<>(); + public final Map vertexs =new LinkedHashMap<>(); + public final List attrs = new ArrayList<>(); LinkedHashMap> tezStageDependency; // some stage may contain only a single operator, e.g., create table operator, // fetch operator. Op op; - public Stage(String name) { + public Stage(String name, TezJsonParser tezJsonParser) { super(); - this.name = name; - parentStages = new ArrayList<>(); - childStages = new ArrayList<>(); - attrs = new ArrayList<>(); - vertexs = new LinkedHashMap<>(); + internalName = name; + externalName = name; + parser = tezJsonParser; } public void addDependency(JSONObject object, Map stages) throws JSONException { - if (!object.has("ROOT STAGE")) { + if (object.has("DEPENDENT STAGES")) { String names = object.getString("DEPENDENT STAGES"); for (String name : names.split(",")) { Stage parent = stages.get(name.trim()); @@ -65,6 +67,16 @@ public void addDependency(JSONObject object, Map stages) throws J parent.childStages.add(this); } } + if (object.has("CONDITIONAL CHILD TASKS")) { + String names = object.getString("CONDITIONAL CHILD TASKS"); + this.externalName = this.internalName + "(CONDITIONAL CHILD TASKS: " + names + ")"; + for (String name : names.split(",")) { + Stage child = stages.get(name.trim()); + child.externalName = child.internalName + "(CONDITIONAL)"; + child.parentStages.add(this); + this.childStages.add(child); + } + } } /** @@ -83,7 +95,7 @@ public void extractVertex(JSONObject object) throws Exception { JSONObject edges = tez.getJSONObject("Edges:"); // iterate for the first time to get all the vertices for (String to : JSONObject.getNames(edges)) { - vertexs.put(to, new Vertex(to, vertices.getJSONObject(to))); + vertexs.put(to, new Vertex(to, vertices.getJSONObject(to), parser)); } // iterate for the second time to get all the vertex dependency for (String to : JSONObject.getNames(edges)) { @@ -95,7 +107,7 @@ public void extractVertex(JSONObject object) throws Exception { String parent = obj.getString("parent"); Vertex parentVertex = vertexs.get(parent); if (parentVertex == null) { - parentVertex = new Vertex(parent, vertices.getJSONObject(parent)); + parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser); vertexs.put(parent, parentVertex); } String type = obj.getString("type"); @@ -117,7 +129,7 @@ public void extractVertex(JSONObject object) throws Exception { String parent = obj.getString("parent"); Vertex parentVertex = vertexs.get(parent); if (parentVertex == null) { - parentVertex = new Vertex(parent, vertices.getJSONObject(parent)); + parentVertex = new Vertex(parent, vertices.getJSONObject(parent), parser); vertexs.put(parent, parentVertex); } String type = obj.getString("type"); @@ -135,7 +147,7 @@ public void extractVertex(JSONObject object) throws Exception { } } else { for (String vertexName : JSONObject.getNames(vertices)) { - vertexs.put(vertexName, new Vertex(vertexName, vertices.getJSONObject(vertexName))); + vertexs.put(vertexName, new Vertex(vertexName, vertices.getJSONObject(vertexName), parser)); } } // The opTree in vertex is extracted @@ -147,11 +159,13 @@ public void extractVertex(JSONObject object) throws Exception { } } else { String[] names = JSONObject.getNames(object); - for (String name : names) { - if (name.contains("Operator")) { - this.op = extractOp(name, object.getJSONObject(name)); - } else { - attrs.add(new Attr(name, object.get(name).toString())); + if (names != null) { + for (String name : names) { + if (name.contains("Operator")) { + this.op = extractOp(name, object.getJSONObject(name)); + } else { + attrs.add(new Attr(name, object.get(name).toString())); + } } } } @@ -185,7 +199,7 @@ Op extractOp(String opName, JSONObject opObj) throws JSONException, JsonParseExc if (name.equals("Processor Tree:")) { JSONObject object = new JSONObject(); object.put(name, attrObj); - v = new Vertex(null, object); + v = new Vertex(null, object, parser); v.extractOpTree(); } else { for (String attrName : JSONObject.getNames(attrObj)) { @@ -194,13 +208,13 @@ Op extractOp(String opName, JSONObject opObj) throws JSONException, JsonParseExc } } } else { - throw new Exception("Unsupported object in " + this.name); + throw new Exception("Unsupported object in " + this.internalName); } } } - Op op = new Op(opName, null, null, null, attrs, null, v); + Op op = new Op(opName, null, null, null, attrs, null, v, parser); if (v != null) { - TezJsonParser.addInline(op, new Connection(null, v)); + parser.addInline(op, new Connection(null, v)); } return op; } @@ -217,37 +231,37 @@ private boolean isPrintable(Object val) { return false; } - public void print(PrintStream out, List indentFlag) throws JSONException, Exception { + public void print(Printer printer, List indentFlag) throws JSONException, Exception { // print stagename - if (TezJsonParser.printSet.contains(this)) { - out.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous " - + this.name); + if (parser.printSet.contains(this)) { + printer.println(TezJsonParser.prefixString(indentFlag) + " Please refer to the previous " + + externalName); return; } - TezJsonParser.printSet.add(this); - out.println(TezJsonParser.prefixString(indentFlag) + this.name); + parser.printSet.add(this); + printer.println(TezJsonParser.prefixString(indentFlag) + externalName); // print vertexes List nextIndentFlag = new ArrayList<>(); nextIndentFlag.addAll(indentFlag); nextIndentFlag.add(false); for (Vertex candidate : this.vertexs.values()) { - if (!TezJsonParser.isInline(candidate) && candidate.children.isEmpty()) { - candidate.print(out, nextIndentFlag, null, null); + if (!parser.isInline(candidate) && candidate.children.isEmpty()) { + candidate.print(printer, nextIndentFlag, null, null); } } if (!attrs.isEmpty()) { Collections.sort(attrs); for (Attr attr : attrs) { - out.println(TezJsonParser.prefixString(nextIndentFlag) + attr.toString()); + printer.println(TezJsonParser.prefixString(nextIndentFlag) + attr.toString()); } } if (op != null) { - op.print(out, nextIndentFlag, false); + op.print(printer, nextIndentFlag, false); } nextIndentFlag.add(false); // print dependent stages for (Stage stage : this.parentStages) { - stage.print(out, nextIndentFlag); + stage.print(printer, nextIndentFlag); } } } diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java index 43ddff3..180fdab 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/TezJsonParser.java @@ -36,28 +36,28 @@ import org.json.JSONException; import org.json.JSONObject; -public class TezJsonParser implements JsonParser { - JSONObject inputObject; - Map stages; - PrintStream outputStream; +public final class TezJsonParser implements JsonParser { + public final Map stages = new HashMap();; protected final Log LOG; // the object that has been printed. - public static Set printSet = new HashSet<>(); - // the vertex that should be inlined. - public static Map> inlineMap = new HashMap<>(); + public final Set printSet = new HashSet<>(); + // the vertex that should be inlined. + public final Map> inlineMap = new HashMap<>(); + public TezJsonParser() { super(); LOG = LogFactory.getLog(this.getClass().getName()); } - public void extractStagesAndPlans() throws JSONException, JsonParseException, - JsonMappingException, Exception, IOException { + + public void extractStagesAndPlans(JSONObject inputObject) throws JSONException, + JsonParseException, JsonMappingException, Exception, IOException { // extract stages - this.stages = new HashMap(); JSONObject dependency = inputObject.getJSONObject("STAGE DEPENDENCIES"); if (dependency.length() > 0) { // iterate for the first time to get all the names of stages. for (String stageName : JSONObject.getNames(dependency)) { - this.stages.put(stageName, new Stage(stageName)); + this.stages.put(stageName, new Stage(stageName, this)); } // iterate for the second time to get all the dependency. for (String stageName : JSONObject.getNames(dependency)) { @@ -77,8 +77,8 @@ public void extractStagesAndPlans() throws JSONException, JsonParseException, /** * @param indentFlag - * help to generate correct indent - * @return + * help to generate correct indent + * @return */ public static String prefixString(List indentFlag) { StringBuilder sb = new StringBuilder(); @@ -94,7 +94,7 @@ public static String prefixString(List indentFlag) { /** * @param indentFlag * @param tail - * help to generate correct indent with a specific tail + * help to generate correct indent with a specific tail * @return */ public static String prefixString(List indentFlag, String tail) { @@ -111,19 +111,18 @@ public static String prefixString(List indentFlag, String tail) { @Override public void print(JSONObject inputObject, PrintStream outputStream) throws Exception { - LOG.info("JsonParser is parsing\n" + inputObject.toString()); - this.inputObject = inputObject; - this.outputStream = outputStream; - this.extractStagesAndPlans(); + LOG.info("JsonParser is parsing:" + inputObject.toString()); + this.extractStagesAndPlans(inputObject); + Printer printer = new Printer(); // print out the cbo info if (inputObject.has("cboInfo")) { - outputStream.println(inputObject.getString("cboInfo")); - outputStream.println(); + printer.println(inputObject.getString("cboInfo")); + printer.println(); } // print out the vertex dependency in root stage for (Stage candidate : this.stages.values()) { if (candidate.tezStageDependency != null && candidate.tezStageDependency.size() > 0) { - outputStream.println("Vertex dependency in root stage"); + printer.println("Vertex dependency in root stage"); for (Entry> entry : candidate.tezStageDependency.entrySet()) { StringBuilder sb = new StringBuilder(); sb.append(entry.getKey().name); @@ -137,21 +136,22 @@ public void print(JSONObject inputObject, PrintStream outputStream) throws Excep } sb.append(connection.from.name + " (" + connection.type + ")"); } - outputStream.println(sb.toString()); + printer.println(sb.toString()); } - outputStream.println(); + printer.println(); } } List indentFlag = new ArrayList<>(); // print out all the stages that have no childStages. for (Stage candidate : this.stages.values()) { if (candidate.childStages.isEmpty()) { - candidate.print(outputStream, indentFlag); + candidate.print(printer, indentFlag); } } + outputStream.println(printer.toString()); } - public static void addInline(Op op, Connection connection) { + public void addInline(Op op, Connection connection) { List list = inlineMap.get(op); if (list == null) { list = new ArrayList<>(); @@ -161,10 +161,11 @@ public static void addInline(Op op, Connection connection) { list.add(connection); } } - public static boolean isInline(Vertex v) { - for(List list : inlineMap.values()){ + + public boolean isInline(Vertex v) { + for (List list : inlineMap.values()) { for (Connection connection : list) { - if(connection.from.equals(v)){ + if (connection.from.equals(v)) { return true; } } diff --git a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java index 9b3405e..4d7b228 100644 --- a/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java +++ b/common/src/java/org/apache/hadoop/hive/common/jsonexplain/tez/Vertex.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.common.jsonexplain.tez; import java.io.IOException; -import java.io.PrintStream; import java.util.ArrayList; import java.util.List; @@ -29,28 +28,30 @@ import org.json.JSONException; import org.json.JSONObject; -public class Vertex { - public String name; +public final class Vertex { + public final String name; + //tezJsonParser + public final TezJsonParser parser; // vertex's parent connections. - public List parentConnections; + public final List parentConnections = new ArrayList<>(); // vertex's children vertex. - public List children; + public final List children = new ArrayList<>(); // the jsonObject for this vertex - public JSONObject vertexObject; + public final JSONObject vertexObject; // whether this vertex is a union vertex public boolean union; // whether this vertex is dummy (which does not really exists but is created), // e.g., a dummy vertex for a mergejoin branch public boolean dummy; // the rootOps in this vertex - public List rootOps; + public final List rootOps = new ArrayList<>(); // we create a dummy vertex for a mergejoin branch for a self join if this // vertex is a mergejoin - public List mergeJoinDummyVertexs; + public final List mergeJoinDummyVertexs = new ArrayList<>(); // whether this vertex has multiple reduce operators - boolean hasMultiReduceOp; + public boolean hasMultiReduceOp = false; - public Vertex(String name, JSONObject vertexObject) { + public Vertex(String name, JSONObject vertexObject, TezJsonParser tezJsonParser) { super(); this.name = name; if (this.name != null && this.name.contains("Union")) { @@ -60,11 +61,7 @@ public Vertex(String name, JSONObject vertexObject) { } this.dummy = false; this.vertexObject = vertexObject; - this.parentConnections = new ArrayList<>(); - this.children = new ArrayList<>(); - this.rootOps = new ArrayList<>(); - this.mergeJoinDummyVertexs = new ArrayList<>(); - this.hasMultiReduceOp = false; + parser = tezJsonParser; } public void addDependency(Connection connection) throws JSONException { @@ -88,20 +85,26 @@ public void extractOpTree() throws JSONException, JsonParseException, JsonMappin extractOp(vertexObject.getJSONArray(key).getJSONObject(0)); } else if (key.equals("Reduce Operator Tree:") || key.equals("Processor Tree:")) { extractOp(vertexObject.getJSONObject(key)); - } - // this is the case when we have a map-side SMB join - // one input of the join is treated as a dummy vertex - else if (key.equals("Join:")) { + } else if (key.equals("Join:")) { + // this is the case when we have a map-side SMB join + // one input of the join is treated as a dummy vertex JSONArray array = vertexObject.getJSONArray(key); for (int index = 0; index < array.length(); index++) { JSONObject mpOpTree = array.getJSONObject(index); - Vertex v = new Vertex("", mpOpTree); + Vertex v = new Vertex("", mpOpTree, parser); v.extractOpTree(); v.dummy = true; mergeJoinDummyVertexs.add(v); } + } else if (key.equals("Merge File Operator")) { + JSONObject opTree = vertexObject.getJSONObject(key); + if (opTree.has("Map Operator Tree:")) { + extractOp(opTree.getJSONArray("Map Operator Tree:").getJSONObject(0)); + } else { + throw new Exception("Merge File Operator does not have a Map Operator Tree"); + } } else { - throw new Exception("unsupported operator tree in vertex " + this.name); + throw new Exception("Unsupported operator tree in vertex " + this.name); } } } @@ -159,7 +162,7 @@ Op extractOp(JSONObject operator) throws JSONException, JsonParseException, Json } } } - Op op = new Op(opName, id, outputVertexName, children, attrs, operator, this); + Op op = new Op(opName, id, outputVertexName, children, attrs, operator, this, parser); if (!children.isEmpty()) { for (Op child : children) { child.parent = op; @@ -171,24 +174,24 @@ Op extractOp(JSONObject operator) throws JSONException, JsonParseException, Json } } - public void print(PrintStream out, List indentFlag, String type, Vertex callingVertex) + public void print(Printer printer, List indentFlag, String type, Vertex callingVertex) throws JSONException, Exception { // print vertexname - if (TezJsonParser.printSet.contains(this) && !hasMultiReduceOp) { + if (parser.printSet.contains(this) && !hasMultiReduceOp) { if (type != null) { - out.println(TezJsonParser.prefixString(indentFlag, "|<-") + printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + " Please refer to the previous " + this.name + " [" + type + "]"); } else { - out.println(TezJsonParser.prefixString(indentFlag, "|<-") + printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + " Please refer to the previous " + this.name); } return; } - TezJsonParser.printSet.add(this); + parser.printSet.add(this); if (type != null) { - out.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]"); + printer.println(TezJsonParser.prefixString(indentFlag, "|<-") + this.name + " [" + type + "]"); } else if (this.name != null) { - out.println(TezJsonParser.prefixString(indentFlag) + this.name); + printer.println(TezJsonParser.prefixString(indentFlag) + this.name); } // print operators if (hasMultiReduceOp && !callingVertex.union) { @@ -200,7 +203,7 @@ public void print(PrintStream out, List indentFlag, String type, Vertex } } if (choose != null) { - choose.print(out, indentFlag, false); + choose.print(printer, indentFlag, false); } else { throw new Exception("Can not find the right reduce output operator for vertex " + this.name); } @@ -208,9 +211,9 @@ public void print(PrintStream out, List indentFlag, String type, Vertex for (Op op : this.rootOps) { // dummy vertex is treated as a branch of a join operator if (this.dummy) { - op.print(out, indentFlag, true); + op.print(printer, indentFlag, true); } else { - op.print(out, indentFlag, false); + op.print(printer, indentFlag, false); } } } @@ -225,7 +228,7 @@ public void print(PrintStream out, List indentFlag, String type, Vertex } else { unionFlag.add(false); } - connection.from.print(out, unionFlag, connection.type, this); + connection.from.print(printer, unionFlag, connection.type, this); } } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 27f68df..503d718 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1705,7 +1705,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false, "Whether to log explain output for every query.\n" + "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."), - HIVE_EXPLAIN_USER("hive.explain.user", false, + HIVE_EXPLAIN_USER("hive.explain.user", true, "Whether to show explain result at user level.\n" + "When enabled, will log EXPLAIN output for the query at user level."), diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index b9f39fb..9e8ea65 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -321,6 +321,7 @@ minitez.query.files=bucket_map_join_tez1.q,\ dynamic_partition_pruning_2.q,\ explainuser_1.q,\ explainuser_2.q,\ + explainuser_3.q,\ hybridgrace_hashjoin_1.q,\ hybridgrace_hashjoin_2.q,\ mapjoin_decimal.q,\ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java index 35c4cfc..c6b49bf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java @@ -39,6 +39,8 @@ import java.util.Set; import java.util.TreeMap; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.jsonexplain.JsonParser; import org.apache.hadoop.hive.common.jsonexplain.JsonParserFactory; @@ -49,7 +51,6 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.physical.StageIDsRearranger; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.plan.ExplainWork; @@ -76,9 +77,11 @@ public static final String EXPL_COLUMN_NAME = "Explain"; private final Set> visitedOps = new HashSet>(); private boolean isLogical = false; + protected final Log LOG; public ExplainTask() { super(); + LOG = LogFactory.getLog(this.getClass().getName()); } /* @@ -288,28 +291,29 @@ public int execute(DriverContext driverContext) { JSONObject jsonDependencies = getJSONDependencies(work); out.print(jsonDependencies); } else { - if (work.getDependency()) { - JSONObject jsonDependencies = getJSONDependencies(work); - out.print(jsonDependencies); + if (work.isUserLevelExplain()) { + // Because of the implementation of the JsonParserFactory, we are sure + // that we can get a TezJsonParser. + JsonParser jsonParser = JsonParserFactory.getParser(conf); + work.setFormatted(true); + JSONObject jsonPlan = getJSONPlan(out, work); + if (work.getCboInfo() != null) { + jsonPlan.put("cboInfo", work.getCboInfo()); + } + try { + jsonParser.print(jsonPlan, out); + } catch (Exception e) { + // if there is anything wrong happen, we bail out. + LOG.error("Running explain user level has problem: " + e.toString() + + ". Falling back to normal explain"); + work.setFormatted(false); + work.setUserLevelExplain(false); + jsonPlan = getJSONPlan(out, work); + } } else { - if (work.isUserLevelExplain()) { - JsonParser jsonParser = JsonParserFactory.getParser(conf); - if (jsonParser != null) { - work.setFormatted(true); - JSONObject jsonPlan = getJSONPlan(out, work); - if (work.getCboInfo() != null) { - jsonPlan.put("cboInfo", work.getCboInfo()); - } - jsonParser.print(jsonPlan, out); - } else { - throw new SemanticException( - "Hive UserLevelExplain only supports tez engine right now."); - } - } else { - JSONObject jsonPlan = getJSONPlan(out, work); - if (work.isFormatted()) { - out.print(jsonPlan); - } + JSONObject jsonPlan = getJSONPlan(out, work); + if (work.isFormatted()) { + out.print(jsonPlan); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index 3fbc8de..66d1546 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -91,8 +91,13 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { pCtx = ((SemanticAnalyzer)sem).getParseContext(); } - boolean userLevelExplain = !extended && !formatted && !dependency && !logical && !authorize - && HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_EXPLAIN_USER); + boolean userLevelExplain = !extended + && !formatted + && !dependency + && !logical + && !authorize + && (HiveConf.getBoolVar(ctx.getConf(), HiveConf.ConfVars.HIVE_EXPLAIN_USER) && HiveConf + .getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")); ExplainWork work = new ExplainWork(ctx.getResFile(), pCtx, tasks, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java index 7a561e6..58120d3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java @@ -21,11 +21,13 @@ import java.io.Serializable; import java.util.List; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + /** * ConditionalWork. * */ -@Explain(displayName = "Conditional Operator") +@Explain(displayName = "Conditional Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class ConditionalWork implements Serializable { private static final long serialVersionUID = 1L; List listWorks; diff --git a/ql/src/test/queries/clientpositive/auto_join0.q b/ql/src/test/queries/clientpositive/auto_join0.q index 008f9e3..24647fc 100644 --- a/ql/src/test/queries/clientpositive/auto_join0.q +++ b/ql/src/test/queries/clientpositive/auto_join0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join = true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/auto_join1.q b/ql/src/test/queries/clientpositive/auto_join1.q index 7414005..3aec73f 100644 --- a/ql/src/test/queries/clientpositive/auto_join1.q +++ b/ql/src/test/queries/clientpositive/auto_join1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join =true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/auto_join21.q b/ql/src/test/queries/clientpositive/auto_join21.q index 17e8a88..10ac490 100644 --- a/ql/src/test/queries/clientpositive/auto_join21.q +++ b/ql/src/test/queries/clientpositive/auto_join21.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join = true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/auto_join29.q b/ql/src/test/queries/clientpositive/auto_join29.q index c9eb9b0..f991540 100644 --- a/ql/src/test/queries/clientpositive/auto_join29.q +++ b/ql/src/test/queries/clientpositive/auto_join29.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join = true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/auto_join30.q b/ql/src/test/queries/clientpositive/auto_join30.q index 9e31f0f..7ac3c0e 100644 --- a/ql/src/test/queries/clientpositive/auto_join30.q +++ b/ql/src/test/queries/clientpositive/auto_join30.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join = true; explain diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q index 83e1cec..c07dd23 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_10.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q index e92504a..f35fec1 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q index 4cda4df..eabeff0 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_14.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q index c7bcae6..a553d93 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_15.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q index e3766e7..9eb85d3 100644 --- a/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q +++ b/ql/src/test/queries/clientpositive/auto_sortmerge_join_9.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/bucket2.q b/ql/src/test/queries/clientpositive/bucket2.q index f9f1627..ecd7e53 100644 --- a/ql/src/test/queries/clientpositive/bucket2.q +++ b/ql/src/test/queries/clientpositive/bucket2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/bucket3.q b/ql/src/test/queries/clientpositive/bucket3.q index b0f89c8..7b7a9c3 100644 --- a/ql/src/test/queries/clientpositive/bucket3.q +++ b/ql/src/test/queries/clientpositive/bucket3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/bucket4.q b/ql/src/test/queries/clientpositive/bucket4.q index 2b3f805..1b49c7a 100644 --- a/ql/src/test/queries/clientpositive/bucket4.q +++ b/ql/src/test/queries/clientpositive/bucket4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing = true; set hive.enforce.sorting = true; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q index 42e26a8..4a7d63e 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; diff --git a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q index a3588ec..2f968bd 100644 --- a/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q +++ b/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; diff --git a/ql/src/test/queries/clientpositive/correlationoptimizer1.q b/ql/src/test/queries/clientpositive/correlationoptimizer1.q index 0596f96..51d2c10 100644 --- a/ql/src/test/queries/clientpositive/correlationoptimizer1.q +++ b/ql/src/test/queries/clientpositive/correlationoptimizer1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=false; set hive.optimize.correlation=false; -- This query has a GroupByOperator folling JoinOperator and they share the same keys. diff --git a/ql/src/test/queries/clientpositive/count.q b/ql/src/test/queries/clientpositive/count.q index 18721e5..ded8be8 100644 --- a/ql/src/test/queries/clientpositive/count.q +++ b/ql/src/test/queries/clientpositive/count.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table abcd (a int, b int, c int, d int); LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd; diff --git a/ql/src/test/queries/clientpositive/cross_join.q b/ql/src/test/queries/clientpositive/cross_join.q index 1f888dd..8eb949e 100644 --- a/ql/src/test/queries/clientpositive/cross_join.q +++ b/ql/src/test/queries/clientpositive/cross_join.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- current explain select src.key from src join src src2; -- ansi cross join diff --git a/ql/src/test/queries/clientpositive/cross_product_check_1.q b/ql/src/test/queries/clientpositive/cross_product_check_1.q index 17a8833..e39912b 100644 --- a/ql/src/test/queries/clientpositive/cross_product_check_1.q +++ b/ql/src/test/queries/clientpositive/cross_product_check_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table A as diff --git a/ql/src/test/queries/clientpositive/cross_product_check_2.q b/ql/src/test/queries/clientpositive/cross_product_check_2.q index de6b7f2..d7d251f 100644 --- a/ql/src/test/queries/clientpositive/cross_product_check_2.q +++ b/ql/src/test/queries/clientpositive/cross_product_check_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table A as diff --git a/ql/src/test/queries/clientpositive/ctas.q b/ql/src/test/queries/clientpositive/ctas.q index 3435d03..edd1f6a 100644 --- a/ql/src/test/queries/clientpositive/ctas.q +++ b/ql/src/test/queries/clientpositive/ctas.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q index 7baca1a..d7f9ac8 100644 --- a/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q +++ b/ql/src/test/queries/clientpositive/disable_merge_for_bucketing.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing = true; set hive.exec.reducers.max = 1; diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q index 376e893..67c4740 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.ppd=true; set hive.ppd.remove.duplicatefilters=true; set hive.tez.dynamic.partition.pruning=true; diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q index a4e84b1..4a9532d 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.ppd=true; set hive.ppd.remove.duplicatefilters=true; set hive.tez.dynamic.partition.pruning=true; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index 78816ae..8001081 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.sort.dynamic.partition=true; set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions=1000; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index e459583..f842efe 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.sort.dynamic.partition=true; set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions=1000; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q index 58319e3..5a504ec 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.sort.dynamic.partition=true; set hive.exec.dynamic.partition=true; set hive.exec.max.dynamic.partitions=1000; diff --git a/ql/src/test/queries/clientpositive/explainuser_3.q b/ql/src/test/queries/clientpositive/explainuser_3.q new file mode 100644 index 0000000..16237bb --- /dev/null +++ b/ql/src/test/queries/clientpositive/explainuser_3.q @@ -0,0 +1,115 @@ +set hive.explain.user=true; + +explain select key, value +FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol; + +explain show tables; + +explain create database newDB location "/tmp/"; + +create database newDB location "/tmp/"; + +explain describe database extended newDB; + +describe database extended newDB; + +explain use newDB; + +use newDB; + +create table tab (name string); + +explain alter table tab rename to newName; + +explain drop table tab; + +drop table tab; + +explain use default; + +use default; + +drop database newDB; + +explain analyze table src compute statistics; + +explain analyze table src compute statistics for columns; + +explain +CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)); + +CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)); + +EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1; +explain DROP TEMPORARY MACRO SIGMOID; +DROP TEMPORARY MACRO SIGMOID; + +explain create table src_autho_test as select * from src; +create table src_autho_test as select * from src; + +set hive.security.authorization.enabled=true; + +explain grant select on table src_autho_test to user hive_test_user; +grant select on table src_autho_test to user hive_test_user; + +explain show grant user hive_test_user on table src_autho_test; +explain show grant user hive_test_user on table src_autho_test(key); + +select key from src_autho_test order by key limit 20; + +explain revoke select on table src_autho_test from user hive_test_user; + +explain grant select(key) on table src_autho_test to user hive_test_user; + +explain revoke select(key) on table src_autho_test from user hive_test_user; + +explain +create role sRc_roLE; + +create role sRc_roLE; + +explain +grant role sRc_roLE to user hive_test_user; + +grant role sRc_roLE to user hive_test_user; + +explain show role grant user hive_test_user; + +explain drop role sRc_roLE; +drop role sRc_roLE; + +set hive.security.authorization.enabled=false; +drop table src_autho_test; + +explain drop view v; + +explain create view v as with cte as (select * from src order by key limit 5) +select * from cte; + +explain with cte as (select * from src order by key limit 5) +select * from cte; + +create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; + +load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5; + +SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +SET mapred.min.split.size=1000; +SET mapred.max.split.size=50000; +SET hive.optimize.index.filter=true; +set hive.merge.orcfile.stripe.level=false; +set hive.merge.tezfiles=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; +set hive.compute.splits.in.am=true; +set tez.grouping.min-size=1000; +set tez.grouping.max-size=50000; + +set hive.merge.orcfile.stripe.level=true; +set hive.merge.tezfiles=true; +set hive.merge.mapfiles=true; +set hive.merge.mapredfiles=true; + +explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; + +drop table orc_merge5; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/groupby1.q b/ql/src/test/queries/clientpositive/groupby1.q index 15f776f..df69cbd 100755 --- a/ql/src/test/queries/clientpositive/groupby1.q +++ b/ql/src/test/queries/clientpositive/groupby1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr=false; set hive.groupby.skewindata=true; diff --git a/ql/src/test/queries/clientpositive/groupby2.q b/ql/src/test/queries/clientpositive/groupby2.q index 2bf0d7a..1966ee7 100755 --- a/ql/src/test/queries/clientpositive/groupby2.q +++ b/ql/src/test/queries/clientpositive/groupby2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.map.aggr=false; set hive.groupby.skewindata=true; diff --git a/ql/src/test/queries/clientpositive/groupby3.q b/ql/src/test/queries/clientpositive/groupby3.q index 8f24584..a9b4039 100755 --- a/ql/src/test/queries/clientpositive/groupby3.q +++ b/ql/src/test/queries/clientpositive/groupby3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr=false; set hive.groupby.skewindata=true; diff --git a/ql/src/test/queries/clientpositive/having.q b/ql/src/test/queries/clientpositive/having.q index 6abc8ae..fdba5cd 100644 --- a/ql/src/test/queries/clientpositive/having.q +++ b/ql/src/test/queries/clientpositive/having.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3; SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3; diff --git a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q index c7d925e..f98dfa9 100644 --- a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q +++ b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- Hybrid Grace Hash Join -- Test basic functionalities: -- 1. Various cases when hash partitions spill diff --git a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q index b3ee414..d6a5250 100644 --- a/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q +++ b/ql/src/test/queries/clientpositive/hybridgrace_hashjoin_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- Hybrid Grace Hash Join -- Test n-way join SELECT 1; diff --git a/ql/src/test/queries/clientpositive/insert_into1.q b/ql/src/test/queries/clientpositive/insert_into1.q index 7271a07..1b7db5c 100644 --- a/ql/src/test/queries/clientpositive/insert_into1.q +++ b/ql/src/test/queries/clientpositive/insert_into1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.compute.query.using.stats=true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/insert_into2.q b/ql/src/test/queries/clientpositive/insert_into2.q index a53f7f4..7183c75 100644 --- a/ql/src/test/queries/clientpositive/insert_into2.q +++ b/ql/src/test/queries/clientpositive/insert_into2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.compute.query.using.stats=true; DROP TABLE insert_into2; CREATE TABLE insert_into2 (key int, value string) diff --git a/ql/src/test/queries/clientpositive/join0.q b/ql/src/test/queries/clientpositive/join0.q index 5d8356b..6ef6843 100644 --- a/ql/src/test/queries/clientpositive/join0.q +++ b/ql/src/test/queries/clientpositive/join0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- JAVA_VERSION_SPECIFIC_OUTPUT -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/join1.q b/ql/src/test/queries/clientpositive/join1.q index a388683..de97e8c 100644 --- a/ql/src/test/queries/clientpositive/join1.q +++ b/ql/src/test/queries/clientpositive/join1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/join_nullsafe.q b/ql/src/test/queries/clientpositive/join_nullsafe.q index 46bbadd..d6eda77 100644 --- a/ql/src/test/queries/clientpositive/join_nullsafe.q +++ b/ql/src/test/queries/clientpositive/join_nullsafe.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS CREATE TABLE myinput1(key int, value int); diff --git a/ql/src/test/queries/clientpositive/limit_pushdown.q b/ql/src/test/queries/clientpositive/limit_pushdown.q index 3940564..74030e3 100644 --- a/ql/src/test/queries/clientpositive/limit_pushdown.q +++ b/ql/src/test/queries/clientpositive/limit_pushdown.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.limit.pushdown.memory.usage=0.3f; set hive.optimize.reducededuplication.min.reducer=1; diff --git a/ql/src/test/queries/clientpositive/load_dyn_part1.q b/ql/src/test/queries/clientpositive/load_dyn_part1.q index df1ed31..68323ab 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part1.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS show partitions srcpart; diff --git a/ql/src/test/queries/clientpositive/load_dyn_part2.q b/ql/src/test/queries/clientpositive/load_dyn_part2.q index eb4e2d5..e804971 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part2.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table if not exists nzhang_part_bucket (key string, value string) diff --git a/ql/src/test/queries/clientpositive/load_dyn_part3.q b/ql/src/test/queries/clientpositive/load_dyn_part3.q index 4fb3860..07423fd 100644 --- a/ql/src/test/queries/clientpositive/load_dyn_part3.q +++ b/ql/src/test/queries/clientpositive/load_dyn_part3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS show partitions srcpart; diff --git a/ql/src/test/queries/clientpositive/lvj_mapjoin.q b/ql/src/test/queries/clientpositive/lvj_mapjoin.q index 4a391b4..b726e2a 100644 --- a/ql/src/test/queries/clientpositive/lvj_mapjoin.q +++ b/ql/src/test/queries/clientpositive/lvj_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS drop table sour1; diff --git a/ql/src/test/queries/clientpositive/mapjoin_decimal.q b/ql/src/test/queries/clientpositive/mapjoin_decimal.q index 7299808..105195b 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_decimal.q +++ b/ql/src/test/queries/clientpositive/mapjoin_decimal.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000000; diff --git a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q index 7f66ff2..9723b3a 100644 --- a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q +++ b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=10000; diff --git a/ql/src/test/queries/clientpositive/mapreduce1.q b/ql/src/test/queries/clientpositive/mapreduce1.q index 83328f1..e2b314f 100644 --- a/ql/src/test/queries/clientpositive/mapreduce1.q +++ b/ql/src/test/queries/clientpositive/mapreduce1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN diff --git a/ql/src/test/queries/clientpositive/mapreduce2.q b/ql/src/test/queries/clientpositive/mapreduce2.q index 7539d3f..0398b49 100644 --- a/ql/src/test/queries/clientpositive/mapreduce2.q +++ b/ql/src/test/queries/clientpositive/mapreduce2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE dest1(key INT, ten INT, one INT, value STRING) STORED AS TEXTFILE; EXPLAIN diff --git a/ql/src/test/queries/clientpositive/merge1.q b/ql/src/test/queries/clientpositive/merge1.q index 3000262..847a50b 100644 --- a/ql/src/test/queries/clientpositive/merge1.q +++ b/ql/src/test/queries/clientpositive/merge1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; diff --git a/ql/src/test/queries/clientpositive/merge2.q b/ql/src/test/queries/clientpositive/merge2.q index b0f01ce..c36a909 100644 --- a/ql/src/test/queries/clientpositive/merge2.q +++ b/ql/src/test/queries/clientpositive/merge2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.mapfiles=true; set hive.merge.mapredfiles=true; set hive.merge.sparkfiles=true; diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q index f89f413..7550e09 100644 --- a/ql/src/test/queries/clientpositive/mergejoin.q +++ b/ql/src/test/queries/clientpositive/mergejoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.join.emit.interval=100000; set hive.optimize.ppd=true; set hive.ppd.remove.duplicatefilters=true; diff --git a/ql/src/test/queries/clientpositive/metadata_only_queries.q b/ql/src/test/queries/clientpositive/metadata_only_queries.q index c7ae739..56f3a78 100644 --- a/ql/src/test/queries/clientpositive/metadata_only_queries.q +++ b/ql/src/test/queries/clientpositive/metadata_only_queries.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.stats.dbclass=fs; set hive.compute.query.using.stats=true; set hive.stats.autogather=true; diff --git a/ql/src/test/queries/clientpositive/mrr.q b/ql/src/test/queries/clientpositive/mrr.q index bd379d2..6960547 100644 --- a/ql/src/test/queries/clientpositive/mrr.q +++ b/ql/src/test/queries/clientpositive/mrr.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- simple query with multiple reduce stages -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/orc_merge1.q b/ql/src/test/queries/clientpositive/orc_merge1.q index 1c0bf41..a8ac85b 100644 --- a/ql/src/test/queries/clientpositive/orc_merge1.q +++ b/ql/src/test/queries/clientpositive/orc_merge1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.orcfile.stripe.level=false; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; diff --git a/ql/src/test/queries/clientpositive/orc_merge2.q b/ql/src/test/queries/clientpositive/orc_merge2.q index 9ffc4bc..44ef280 100644 --- a/ql/src/test/queries/clientpositive/orc_merge2.q +++ b/ql/src/test/queries/clientpositive/orc_merge2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.orcfile.stripe.level=true; set hive.exec.dynamic.partition=true; set hive.exec.dynamic.partition.mode=nonstrict; diff --git a/ql/src/test/queries/clientpositive/orc_merge3.q b/ql/src/test/queries/clientpositive/orc_merge3.q index 444ea65..9722e6d 100644 --- a/ql/src/test/queries/clientpositive/orc_merge3.q +++ b/ql/src/test/queries/clientpositive/orc_merge3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.orcfile.stripe.level=true; DROP TABLE orcfile_merge3a; diff --git a/ql/src/test/queries/clientpositive/orc_merge4.q b/ql/src/test/queries/clientpositive/orc_merge4.q index 769d4d3..3b50465 100644 --- a/ql/src/test/queries/clientpositive/orc_merge4.q +++ b/ql/src/test/queries/clientpositive/orc_merge4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.merge.orcfile.stripe.level=true; DROP TABLE orcfile_merge3a; diff --git a/ql/src/test/queries/clientpositive/orc_merge5.q b/ql/src/test/queries/clientpositive/orc_merge5.q index 9682347..3d32875 100644 --- a/ql/src/test/queries/clientpositive/orc_merge5.q +++ b/ql/src/test/queries/clientpositive/orc_merge5.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; diff --git a/ql/src/test/queries/clientpositive/orc_merge6.q b/ql/src/test/queries/clientpositive/orc_merge6.q index cddc73a..6bdaa9e 100644 --- a/ql/src/test/queries/clientpositive/orc_merge6.q +++ b/ql/src/test/queries/clientpositive/orc_merge6.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS -- orc file merge tests for static partitions diff --git a/ql/src/test/queries/clientpositive/orc_merge7.q b/ql/src/test/queries/clientpositive/orc_merge7.q index 7e0d352..7a351c6 100644 --- a/ql/src/test/queries/clientpositive/orc_merge7.q +++ b/ql/src/test/queries/clientpositive/orc_merge7.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS -- orc merge file tests for dynamic partition case diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q index 0348948..dd58524 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc; diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q index 5deec3c..a8f8842 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat2.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS -- orc merge file tests for dynamic partition case diff --git a/ql/src/test/queries/clientpositive/parallel.q b/ql/src/test/queries/clientpositive/parallel.q index 7cd4015..b8c0445 100644 --- a/ql/src/test/queries/clientpositive/parallel.q +++ b/ql/src/test/queries/clientpositive/parallel.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set mapred.job.name='test_parallel'; set hive.exec.parallel=true; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; diff --git a/ql/src/test/queries/clientpositive/ptf.q b/ql/src/test/queries/clientpositive/ptf.q index 2599317..b5b271b 100644 --- a/ql/src/test/queries/clientpositive/ptf.q +++ b/ql/src/test/queries/clientpositive/ptf.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS --1. test1 diff --git a/ql/src/test/queries/clientpositive/ptf_matchpath.q b/ql/src/test/queries/clientpositive/ptf_matchpath.q index 6487135..7b67a34 100644 --- a/ql/src/test/queries/clientpositive/ptf_matchpath.q +++ b/ql/src/test/queries/clientpositive/ptf_matchpath.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; DROP TABLE flights_tiny; create table flights_tiny ( diff --git a/ql/src/test/queries/clientpositive/ptf_streaming.q b/ql/src/test/queries/clientpositive/ptf_streaming.q index 7795365..aa70a5c 100644 --- a/ql/src/test/queries/clientpositive/ptf_streaming.q +++ b/ql/src/test/queries/clientpositive/ptf_streaming.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS --1. test1 diff --git a/ql/src/test/queries/clientpositive/script_pipe.q b/ql/src/test/queries/clientpositive/script_pipe.q index ae2fd2e..95a08dd 100644 --- a/ql/src/test/queries/clientpositive/script_pipe.q +++ b/ql/src/test/queries/clientpositive/script_pipe.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.exec.script.allow.partial.consumption = true; -- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp; diff --git a/ql/src/test/queries/clientpositive/selectDistinctStar.q b/ql/src/test/queries/clientpositive/selectDistinctStar.q index c1c1c0b..244824f 100644 --- a/ql/src/test/queries/clientpositive/selectDistinctStar.q +++ b/ql/src/test/queries/clientpositive/selectDistinctStar.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS explain select distinct src.* from src; diff --git a/ql/src/test/queries/clientpositive/select_dummy_source.q b/ql/src/test/queries/clientpositive/select_dummy_source.q index 25a1a81..915534a 100644 --- a/ql/src/test/queries/clientpositive/select_dummy_source.q +++ b/ql/src/test/queries/clientpositive/select_dummy_source.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; explain select 'a', 100; select 'a', 100; diff --git a/ql/src/test/queries/clientpositive/skewjoin.q b/ql/src/test/queries/clientpositive/skewjoin.q index 6d43ea8..5c8b326 100644 --- a/ql/src/test/queries/clientpositive/skewjoin.q +++ b/ql/src/test/queries/clientpositive/skewjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.skewjoin = true; set hive.skewjoin.key = 2; diff --git a/ql/src/test/queries/clientpositive/stats_noscan_1.q b/ql/src/test/queries/clientpositive/stats_noscan_1.q index 02b4c2a..2681f77 100644 --- a/ql/src/test/queries/clientpositive/stats_noscan_1.q +++ b/ql/src/test/queries/clientpositive/stats_noscan_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set datanucleus.cache.collections=false; set hive.stats.autogather=false; set hive.exec.dynamic.partition=true; diff --git a/ql/src/test/queries/clientpositive/stats_only_null.q b/ql/src/test/queries/clientpositive/stats_only_null.q index 5f89499..b2bd4dc 100644 --- a/ql/src/test/queries/clientpositive/stats_only_null.q +++ b/ql/src/test/queries/clientpositive/stats_only_null.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.stats.dbclass=fs; set hive.compute.query.using.stats=true; set hive.stats.autogather=true; diff --git a/ql/src/test/queries/clientpositive/subquery_exists.q b/ql/src/test/queries/clientpositive/subquery_exists.q index 103663e..720e360 100644 --- a/ql/src/test/queries/clientpositive/subquery_exists.q +++ b/ql/src/test/queries/clientpositive/subquery_exists.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS -- no agg, corr diff --git a/ql/src/test/queries/clientpositive/subquery_in.q b/ql/src/test/queries/clientpositive/subquery_in.q index 7090975..e81872f 100644 --- a/ql/src/test/queries/clientpositive/subquery_in.q +++ b/ql/src/test/queries/clientpositive/subquery_in.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS -- non agg, non corr diff --git a/ql/src/test/queries/clientpositive/temp_table.q b/ql/src/test/queries/clientpositive/temp_table.q index ac0acb1..e587f3f 100644 --- a/ql/src/test/queries/clientpositive/temp_table.q +++ b/ql/src/test/queries/clientpositive/temp_table.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; EXPLAIN CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; CREATE TEMPORARY TABLE foo AS SELECT * FROM src WHERE key % 2 = 0; diff --git a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q index 996ea88..a06bb82 100644 --- a/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q +++ b/ql/src/test/queries/clientpositive/tez_bmj_schema_evolution.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.enforce.bucketing=true; set hive.enforce.sorting = true; set hive.optimize.bucketingsorting=false; diff --git a/ql/src/test/queries/clientpositive/tez_dml.q b/ql/src/test/queries/clientpositive/tez_dml.q index d8480cc..10c8854 100644 --- a/ql/src/test/queries/clientpositive/tez_dml.q +++ b/ql/src/test/queries/clientpositive/tez_dml.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.exec.dynamic.partition.mode=nonstrict; -- CTAS diff --git a/ql/src/test/queries/clientpositive/tez_join.q b/ql/src/test/queries/clientpositive/tez_join.q index 0ec1881..d590959 100644 --- a/ql/src/test/queries/clientpositive/tez_join.q +++ b/ql/src/test/queries/clientpositive/tez_join.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.sortmerge.join = true; create table t1( diff --git a/ql/src/test/queries/clientpositive/tez_join_hash.q b/ql/src/test/queries/clientpositive/tez_join_hash.q index 73cd53e..09a1d8b 100644 --- a/ql/src/test/queries/clientpositive/tez_join_hash.q +++ b/ql/src/test/queries/clientpositive/tez_join_hash.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS create table orc_src (key string, value string) STORED AS ORC; diff --git a/ql/src/test/queries/clientpositive/tez_join_tests.q b/ql/src/test/queries/clientpositive/tez_join_tests.q index 8b65049..ff42f13 100644 --- a/ql/src/test/queries/clientpositive/tez_join_tests.q +++ b/ql/src/test/queries/clientpositive/tez_join_tests.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS explain diff --git a/ql/src/test/queries/clientpositive/tez_joins_explain.q b/ql/src/test/queries/clientpositive/tez_joins_explain.q index 4b6b0ca..3187e79 100644 --- a/ql/src/test/queries/clientpositive/tez_joins_explain.q +++ b/ql/src/test/queries/clientpositive/tez_joins_explain.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS explain select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key; diff --git a/ql/src/test/queries/clientpositive/tez_smb_1.q b/ql/src/test/queries/clientpositive/tez_smb_1.q index 62a415b..03a1fea 100644 --- a/ql/src/test/queries/clientpositive/tez_smb_1.q +++ b/ql/src/test/queries/clientpositive/tez_smb_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; set hive.join.emit.interval=2; set hive.auto.convert.join.noconditionaltask=true; diff --git a/ql/src/test/queries/clientpositive/tez_smb_main.q b/ql/src/test/queries/clientpositive/tez_smb_main.q index 1802709..dff5112 100644 --- a/ql/src/test/queries/clientpositive/tez_smb_main.q +++ b/ql/src/test/queries/clientpositive/tez_smb_main.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.join.emit.interval=2; explain select * from src a join src1 b on a.key = b.key; diff --git a/ql/src/test/queries/clientpositive/tez_union.q b/ql/src/test/queries/clientpositive/tez_union.q index 96f58b2..2211f77 100644 --- a/ql/src/test/queries/clientpositive/tez_union.q +++ b/ql/src/test/queries/clientpositive/tez_union.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.auto.convert.join=true; explain diff --git a/ql/src/test/queries/clientpositive/tez_union2.q b/ql/src/test/queries/clientpositive/tez_union2.q index af7b51c..22eab01 100644 --- a/ql/src/test/queries/clientpositive/tez_union2.q +++ b/ql/src/test/queries/clientpositive/tez_union2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; explain SELECT key, value FROM ( diff --git a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q index 1c44a6c..e022d81 100644 --- a/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q +++ b/ql/src/test/queries/clientpositive/tez_union_dynamic_partition.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; create table dummy(i int); insert into table dummy values (1); select * from dummy; diff --git a/ql/src/test/queries/clientpositive/tez_union_group_by.q b/ql/src/test/queries/clientpositive/tez_union_group_by.q index 4a58474..200f38d 100644 --- a/ql/src/test/queries/clientpositive/tez_union_group_by.q +++ b/ql/src/test/queries/clientpositive/tez_union_group_by.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE x ( u bigint, diff --git a/ql/src/test/queries/clientpositive/tez_union_multiinsert.q b/ql/src/test/queries/clientpositive/tez_union_multiinsert.q index f7c11a3..67a9ac5 100644 --- a/ql/src/test/queries/clientpositive/tez_union_multiinsert.q +++ b/ql/src/test/queries/clientpositive/tez_union_multiinsert.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE; diff --git a/ql/src/test/queries/clientpositive/transform1.q b/ql/src/test/queries/clientpositive/transform1.q index 15e42e7..f2d23b9 100644 --- a/ql/src/test/queries/clientpositive/transform1.q +++ b/ql/src/test/queries/clientpositive/transform1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.entity.capture.transform=true; create table transform1_t1(a string, b string); diff --git a/ql/src/test/queries/clientpositive/union2.q b/ql/src/test/queries/clientpositive/union2.q index 272fbce..58765c7 100644 --- a/ql/src/test/queries/clientpositive/union2.q +++ b/ql/src/test/queries/clientpositive/union2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_BEFORE_DIFF -- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink diff --git a/ql/src/test/queries/clientpositive/union3.q b/ql/src/test/queries/clientpositive/union3.q index add4ea6..35b298b 100644 --- a/ql/src/test/queries/clientpositive/union3.q +++ b/ql/src/test/queries/clientpositive/union3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_QUERY_RESULTS explain diff --git a/ql/src/test/queries/clientpositive/union4.q b/ql/src/test/queries/clientpositive/union4.q index b9219d9..5e4f6fd 100644 --- a/ql/src/test/queries/clientpositive/union4.q +++ b/ql/src/test/queries/clientpositive/union4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr = true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/union5.q b/ql/src/test/queries/clientpositive/union5.q index ee2ef64..b495d01 100644 --- a/ql/src/test/queries/clientpositive/union5.q +++ b/ql/src/test/queries/clientpositive/union5.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr = true; -- SORT_BEFORE_DIFF -- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink diff --git a/ql/src/test/queries/clientpositive/union6.q b/ql/src/test/queries/clientpositive/union6.q index 085e150..87347b1 100644 --- a/ql/src/test/queries/clientpositive/union6.q +++ b/ql/src/test/queries/clientpositive/union6.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr = true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/union7.q b/ql/src/test/queries/clientpositive/union7.q index 67767e7..872b09e 100644 --- a/ql/src/test/queries/clientpositive/union7.q +++ b/ql/src/test/queries/clientpositive/union7.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.map.aggr = true; -- SORT_BEFORE_DIFF diff --git a/ql/src/test/queries/clientpositive/union8.q b/ql/src/test/queries/clientpositive/union8.q index 366de73..bc5f3b1 100644 --- a/ql/src/test/queries/clientpositive/union8.q +++ b/ql/src/test/queries/clientpositive/union8.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_BEFORE_DIFF -- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by filesink diff --git a/ql/src/test/queries/clientpositive/union9.q b/ql/src/test/queries/clientpositive/union9.q index 4cc3a96..064cf10 100644 --- a/ql/src/test/queries/clientpositive/union9.q +++ b/ql/src/test/queries/clientpositive/union9.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- SORT_BEFORE_DIFF -- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by reducesink diff --git a/ql/src/test/queries/clientpositive/unionDistinct_1.q b/ql/src/test/queries/clientpositive/unionDistinct_1.q index 61917d7..984ce2e 100644 --- a/ql/src/test/queries/clientpositive/unionDistinct_1.q +++ b/ql/src/test/queries/clientpositive/unionDistinct_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- union10.q diff --git a/ql/src/test/queries/clientpositive/vector_aggregate_9.q b/ql/src/test/queries/clientpositive/vector_aggregate_9.q index 85bcc5a..ce6f0ff 100644 --- a/ql/src/test/queries/clientpositive/vector_aggregate_9.q +++ b/ql/src/test/queries/clientpositive/vector_aggregate_9.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; create table vectortab2k( diff --git a/ql/src/test/queries/clientpositive/vector_between_in.q b/ql/src/test/queries/clientpositive/vector_between_in.q index 1bc6611..28df728 100644 --- a/ql/src/test/queries/clientpositive/vector_between_in.q +++ b/ql/src/test/queries/clientpositive/vector_between_in.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate; diff --git a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q index 3bdfd8c..fd9bf6f 100644 --- a/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q +++ b/ql/src/test/queries/clientpositive/vector_binary_join_groupby.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.auto.convert.join=true; SET hive.auto.convert.join.noconditionaltask=true; SET hive.auto.convert.join.noconditionaltask.size=1000000000; diff --git a/ql/src/test/queries/clientpositive/vector_bucket.q b/ql/src/test/queries/clientpositive/vector_bucket.q index 19a09c4..9360ce0 100644 --- a/ql/src/test/queries/clientpositive/vector_bucket.q +++ b/ql/src/test/queries/clientpositive/vector_bucket.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.support.concurrency=true; set hive.enforce.bucketing=true; diff --git a/ql/src/test/queries/clientpositive/vector_cast_constant.q b/ql/src/test/queries/clientpositive/vector_cast_constant.q index 890fcb4..d15e87b 100644 --- a/ql/src/test/queries/clientpositive/vector_cast_constant.q +++ b/ql/src/test/queries/clientpositive/vector_cast_constant.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; -- JAVA_VERSION_SPECIFIC_OUTPUT diff --git a/ql/src/test/queries/clientpositive/vector_char_2.q b/ql/src/test/queries/clientpositive/vector_char_2.q index 0828ca1..efa3154 100644 --- a/ql/src/test/queries/clientpositive/vector_char_2.q +++ b/ql/src/test/queries/clientpositive/vector_char_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; drop table char_2; diff --git a/ql/src/test/queries/clientpositive/vector_char_4.q b/ql/src/test/queries/clientpositive/vector_char_4.q index c824456..06f1d2b 100644 --- a/ql/src/test/queries/clientpositive/vector_char_4.q +++ b/ql/src/test/queries/clientpositive/vector_char_4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; drop table if exists vectortab2k; diff --git a/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q b/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q index 76e9de8..a31e7b9 100644 --- a/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q +++ b/ql/src/test/queries/clientpositive/vector_char_mapjoin1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_char_simple.q b/ql/src/test/queries/clientpositive/vector_char_simple.q index 858fe16..a921140 100644 --- a/ql/src/test/queries/clientpositive/vector_char_simple.q +++ b/ql/src/test/queries/clientpositive/vector_char_simple.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; drop table char_2; diff --git a/ql/src/test/queries/clientpositive/vector_coalesce.q b/ql/src/test/queries/clientpositive/vector_coalesce.q index 052ab71..eea0f48 100644 --- a/ql/src/test/queries/clientpositive/vector_coalesce.q +++ b/ql/src/test/queries/clientpositive/vector_coalesce.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint) FROM alltypesorc diff --git a/ql/src/test/queries/clientpositive/vector_coalesce_2.q b/ql/src/test/queries/clientpositive/vector_coalesce_2.q index b2c46dc..1dee7d4 100644 --- a/ql/src/test/queries/clientpositive/vector_coalesce_2.q +++ b/ql/src/test/queries/clientpositive/vector_coalesce_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=false; set hive.fetch.task.conversion=none; diff --git a/ql/src/test/queries/clientpositive/vector_count_distinct.q b/ql/src/test/queries/clientpositive/vector_count_distinct.q index c1aae08..985c32e 100644 --- a/ql/src/test/queries/clientpositive/vector_count_distinct.q +++ b/ql/src/test/queries/clientpositive/vector_count_distinct.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; create table web_sales_txt diff --git a/ql/src/test/queries/clientpositive/vector_data_types.q b/ql/src/test/queries/clientpositive/vector_data_types.q index c7a9c4c..06a650c 100644 --- a/ql/src/test/queries/clientpositive/vector_data_types.q +++ b/ql/src/test/queries/clientpositive/vector_data_types.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; DROP TABLE over1k; DROP TABLE over1korc; diff --git a/ql/src/test/queries/clientpositive/vector_date_1.q b/ql/src/test/queries/clientpositive/vector_date_1.q index 908c082..165f86a 100644 --- a/ql/src/test/queries/clientpositive/vector_date_1.q +++ b/ql/src/test/queries/clientpositive/vector_date_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_1.q b/ql/src/test/queries/clientpositive/vector_decimal_1.q index 9646094..f6578cd 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_1.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q index e0d5b98..32898e6 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_2.q b/ql/src/test/queries/clientpositive/vector_decimal_2.q index 9f29092..79d602a 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_2.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q index ee83f5b..552a564 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE decimal_vgby STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, diff --git a/ql/src/test/queries/clientpositive/vector_decimal_cast.q b/ql/src/test/queries/clientpositive/vector_decimal_cast.q index ea7a5b8..eb0e75c 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_cast.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_cast.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q index a74b17b..5c232c8 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_expressions.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_expressions.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q index 0c07b47..16c2ed0 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.auto.convert.join=true; SET hive.auto.convert.join.noconditionaltask=true; SET hive.auto.convert.join.noconditionaltask.size=1000000000; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q index b8fa7cd..74c7490 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; SET hive.vectorized.execution.enabled=true; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_precision.q b/ql/src/test/queries/clientpositive/vector_decimal_precision.q index 586cd8a..c7f449a 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_precision.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_precision.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_round.q b/ql/src/test/queries/clientpositive/vector_decimal_round.q index 2b7a005..7ebc75b 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_round.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_round.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_round_2.q b/ql/src/test/queries/clientpositive/vector_decimal_round_2.q index e5f10d5..23be1fc 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_round_2.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_round_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_udf.q b/ql/src/test/queries/clientpositive/vector_decimal_udf.q index 072abf2..a8ee2e8 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_udf.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_udf.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q index 8fb0e15..0259626 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_udf2.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_udf2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_distinct_2.q b/ql/src/test/queries/clientpositive/vector_distinct_2.q index 1a577de..07945e3 100644 --- a/ql/src/test/queries/clientpositive/vector_distinct_2.q +++ b/ql/src/test/queries/clientpositive/vector_distinct_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/vector_elt.q b/ql/src/test/queries/clientpositive/vector_elt.q index 1430a17..e11b35f 100644 --- a/ql/src/test/queries/clientpositive/vector_elt.q +++ b/ql/src/test/queries/clientpositive/vector_elt.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) diff --git a/ql/src/test/queries/clientpositive/vector_groupby_3.q b/ql/src/test/queries/clientpositive/vector_groupby_3.q index df0f74b..014f9e1 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_3.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q index 5da2089..a6b6b1b 100644 --- a/ql/src/test/queries/clientpositive/vector_groupby_reduce.q +++ b/ql/src/test/queries/clientpositive/vector_groupby_reduce.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; create table store_sales_txt diff --git a/ql/src/test/queries/clientpositive/vector_grouping_sets.q b/ql/src/test/queries/clientpositive/vector_grouping_sets.q index 9449ec4..09ba6b6 100644 --- a/ql/src/test/queries/clientpositive/vector_grouping_sets.q +++ b/ql/src/test/queries/clientpositive/vector_grouping_sets.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; diff --git a/ql/src/test/queries/clientpositive/vector_if_expr.q b/ql/src/test/queries/clientpositive/vector_if_expr.q index c8ab55e..2e704b7 100644 --- a/ql/src/test/queries/clientpositive/vector_if_expr.q +++ b/ql/src/test/queries/clientpositive/vector_if_expr.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_inner_join.q b/ql/src/test/queries/clientpositive/vector_inner_join.q index 025b1a4..aa242a5 100644 --- a/ql/src/test/queries/clientpositive/vector_inner_join.q +++ b/ql/src/test/queries/clientpositive/vector_inner_join.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_interval_1.q b/ql/src/test/queries/clientpositive/vector_interval_1.q index 1f3e620..ffa9e16 100644 --- a/ql/src/test/queries/clientpositive/vector_interval_1.q +++ b/ql/src/test/queries/clientpositive/vector_interval_1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_interval_2.q b/ql/src/test/queries/clientpositive/vector_interval_2.q index 467e5f2..d413f5a 100644 --- a/ql/src/test/queries/clientpositive/vector_interval_2.q +++ b/ql/src/test/queries/clientpositive/vector_interval_2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q b/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q index 9a58658..671aa2c 100644 --- a/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vector_interval_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_join30.q b/ql/src/test/queries/clientpositive/vector_join30.q index 2275804..1467cd3 100644 --- a/ql/src/test/queries/clientpositive/vector_join30.q +++ b/ql/src/test/queries/clientpositive/vector_join30.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; set hive.fetch.task.conversion=none; diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join.q b/ql/src/test/queries/clientpositive/vector_left_outer_join.q index 7c46c53..2fc5240 100644 --- a/ql/src/test/queries/clientpositive/vector_left_outer_join.q +++ b/ql/src/test/queries/clientpositive/vector_left_outer_join.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.vectorized.execution.enabled=true; set hive.auto.convert.join=true; set hive.mapjoin.hybridgrace.hashtable=false; diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q index 62ad9ee..3ba67a2 100644 --- a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q +++ b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.fetch.task.conversion=none; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q index 522ab12..680d2c9 100644 --- a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.fetch.task.conversion=none; set hive.auto.convert.join=true; set hive.auto.convert.join.noconditionaltask=true; diff --git a/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q b/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q index 99aff3b..a83d95f 100644 --- a/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q +++ b/ql/src/test/queries/clientpositive/vector_mapjoin_reduce.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q index 23c6cf9..bb87344 100644 --- a/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q +++ b/ql/src/test/queries/clientpositive/vector_mr_diff_schema_alias.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; create table date_dim diff --git a/ql/src/test/queries/clientpositive/vector_multi_insert.q b/ql/src/test/queries/clientpositive/vector_multi_insert.q index 9620f92..8c92cda 100644 --- a/ql/src/test/queries/clientpositive/vector_multi_insert.q +++ b/ql/src/test/queries/clientpositive/vector_multi_insert.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_non_string_partition.q b/ql/src/test/queries/clientpositive/vector_non_string_partition.q index fc1dc6d..7759796 100644 --- a/ql/src/test/queries/clientpositive/vector_non_string_partition.q +++ b/ql/src/test/queries/clientpositive/vector_non_string_partition.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC; SET hive.exec.dynamic.partition.mode=nonstrict; diff --git a/ql/src/test/queries/clientpositive/vector_null_projection.q b/ql/src/test/queries/clientpositive/vector_null_projection.q index bab496e..66c0838 100644 --- a/ql/src/test/queries/clientpositive/vector_null_projection.q +++ b/ql/src/test/queries/clientpositive/vector_null_projection.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; diff --git a/ql/src/test/queries/clientpositive/vector_nullsafe_join.q b/ql/src/test/queries/clientpositive/vector_nullsafe_join.q index 316b2a6..b316a54 100644 --- a/ql/src/test/queries/clientpositive/vector_nullsafe_join.q +++ b/ql/src/test/queries/clientpositive/vector_nullsafe_join.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_orderby_5.q b/ql/src/test/queries/clientpositive/vector_orderby_5.q index 571ecc9..2014ef7 100644 --- a/ql/src/test/queries/clientpositive/vector_orderby_5.q +++ b/ql/src/test/queries/clientpositive/vector_orderby_5.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; create table vectortab2k( diff --git a/ql/src/test/queries/clientpositive/vector_outer_join0.q b/ql/src/test/queries/clientpositive/vector_outer_join0.q index 95bdc41..f8caa83 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join0.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join1.q b/ql/src/test/queries/clientpositive/vector_outer_join1.q index b695b08..c255945 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join1.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join2.q b/ql/src/test/queries/clientpositive/vector_outer_join2.q index d7abb21..2e07877 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join2.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join2.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join3.q b/ql/src/test/queries/clientpositive/vector_outer_join3.q index f256e39..5435b9a 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join3.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join3.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join4.q b/ql/src/test/queries/clientpositive/vector_outer_join4.q index fb9e6e4..1a41e9a 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join4.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; diff --git a/ql/src/test/queries/clientpositive/vector_outer_join5.q b/ql/src/test/queries/clientpositive/vector_outer_join5.q index b7ee4a4..4b6b6d6 100644 --- a/ql/src/test/queries/clientpositive/vector_outer_join5.q +++ b/ql/src/test/queries/clientpositive/vector_outer_join5.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.vectorized.execution.mapjoin.native.enabled=true; set hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q b/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q index 30ea590..f2335e4 100644 --- a/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q +++ b/ql/src/test/queries/clientpositive/vector_partition_diff_num_cols.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q index 1aeec8c..c6480d2 100644 --- a/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q +++ b/ql/src/test/queries/clientpositive/vector_partitioned_date_time.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q index a282518..4a50150 100644 --- a/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q +++ b/ql/src/test/queries/clientpositive/vector_reduce_groupby_decimal.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc WHERE cint is not null and cdouble is not null; diff --git a/ql/src/test/queries/clientpositive/vector_string_concat.q b/ql/src/test/queries/clientpositive/vector_string_concat.q index 36be0ec..06bd68e 100644 --- a/ql/src/test/queries/clientpositive/vector_string_concat.q +++ b/ql/src/test/queries/clientpositive/vector_string_concat.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; DROP TABLE over1k; diff --git a/ql/src/test/queries/clientpositive/vector_varchar_4.q b/ql/src/test/queries/clientpositive/vector_varchar_4.q index c1e9c67..32a74a4 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_4.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_4.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; drop table if exists vectortab2k; diff --git a/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q b/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q index 2aade4e..c5c9048 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_mapjoin1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; SET hive.auto.convert.join=true; diff --git a/ql/src/test/queries/clientpositive/vector_varchar_simple.q b/ql/src/test/queries/clientpositive/vector_varchar_simple.q index 1cd30ee..acd6598 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_simple.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_simple.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; drop table varchar_2; diff --git a/ql/src/test/queries/clientpositive/vectorization_0.q b/ql/src/test/queries/clientpositive/vectorization_0.q index b3cd794..08aeff9 100644 --- a/ql/src/test/queries/clientpositive/vectorization_0.q +++ b/ql/src/test/queries/clientpositive/vectorization_0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; -- Use ORDER BY clauses to generate 2 stages. diff --git a/ql/src/test/queries/clientpositive/vectorization_13.q b/ql/src/test/queries/clientpositive/vectorization_13.q index 2103015..51dc058 100644 --- a/ql/src/test/queries/clientpositive/vectorization_13.q +++ b/ql/src/test/queries/clientpositive/vectorization_13.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vectorization_14.q b/ql/src/test/queries/clientpositive/vectorization_14.q index e766d14..3354605 100644 --- a/ql/src/test/queries/clientpositive/vectorization_14.q +++ b/ql/src/test/queries/clientpositive/vectorization_14.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN diff --git a/ql/src/test/queries/clientpositive/vectorization_15.q b/ql/src/test/queries/clientpositive/vectorization_15.q index 5ad2de7..4502ec0 100644 --- a/ql/src/test/queries/clientpositive/vectorization_15.q +++ b/ql/src/test/queries/clientpositive/vectorization_15.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN diff --git a/ql/src/test/queries/clientpositive/vectorization_16.q b/ql/src/test/queries/clientpositive/vectorization_16.q index 422ac5a..84f1faa 100644 --- a/ql/src/test/queries/clientpositive/vectorization_16.q +++ b/ql/src/test/queries/clientpositive/vectorization_16.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; -- SORT_QUERY_RESULTS diff --git a/ql/src/test/queries/clientpositive/vectorization_7.q b/ql/src/test/queries/clientpositive/vectorization_7.q index 7d5071f..7ef3e2e 100644 --- a/ql/src/test/queries/clientpositive/vectorization_7.q +++ b/ql/src/test/queries/clientpositive/vectorization_7.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vectorization_8.q b/ql/src/test/queries/clientpositive/vectorization_8.q index 9cfe86f..f50b874 100644 --- a/ql/src/test/queries/clientpositive/vectorization_8.q +++ b/ql/src/test/queries/clientpositive/vectorization_8.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vectorization_9.q b/ql/src/test/queries/clientpositive/vectorization_9.q index 2d927e5..5a05e63 100644 --- a/ql/src/test/queries/clientpositive/vectorization_9.q +++ b/ql/src/test/queries/clientpositive/vectorization_9.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; EXPLAIN diff --git a/ql/src/test/queries/clientpositive/vectorization_decimal_date.q b/ql/src/test/queries/clientpositive/vectorization_decimal_date.q index 2b82a5a..854ee20 100644 --- a/ql/src/test/queries/clientpositive/vectorization_decimal_date.q +++ b/ql/src/test/queries/clientpositive/vectorization_decimal_date.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc; SET hive.vectorized.execution.enabled=true; EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10; diff --git a/ql/src/test/queries/clientpositive/vectorization_div0.q b/ql/src/test/queries/clientpositive/vectorization_div0.q index 26bd0f2..78f91f4 100644 --- a/ql/src/test/queries/clientpositive/vectorization_div0.q +++ b/ql/src/test/queries/clientpositive/vectorization_div0.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled = true; -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants diff --git a/ql/src/test/queries/clientpositive/vectorization_limit.q b/ql/src/test/queries/clientpositive/vectorization_limit.q index 094a8d2..8799087 100644 --- a/ql/src/test/queries/clientpositive/vectorization_limit.q +++ b/ql/src/test/queries/clientpositive/vectorization_limit.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7; SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7; diff --git a/ql/src/test/queries/clientpositive/vectorization_part_project.q b/ql/src/test/queries/clientpositive/vectorization_part_project.q index e925ea8..a6abb86 100644 --- a/ql/src/test/queries/clientpositive/vectorization_part_project.q +++ b/ql/src/test/queries/clientpositive/vectorization_part_project.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC; insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc order by ctinyint, cint, cbigint limit 100; diff --git a/ql/src/test/queries/clientpositive/vectorization_pushdown.q b/ql/src/test/queries/clientpositive/vectorization_pushdown.q index bafe550..c54c529 100644 --- a/ql/src/test/queries/clientpositive/vectorization_pushdown.q +++ b/ql/src/test/queries/clientpositive/vectorization_pushdown.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.optimize.index.filter=true; explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble; diff --git a/ql/src/test/queries/clientpositive/vectorization_short_regress.q b/ql/src/test/queries/clientpositive/vectorization_short_regress.q index 404b799..ba6e89d 100644 --- a/ql/src/test/queries/clientpositive/vectorization_short_regress.q +++ b/ql/src/test/queries/clientpositive/vectorization_short_regress.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; diff --git a/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q b/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q index ef1b816..022ce2e 100644 --- a/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q +++ b/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; create table vsmb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS diff --git a/ql/src/test/queries/clientpositive/vectorized_case.q b/ql/src/test/queries/clientpositive/vectorized_case.q index e448d51..8799fbb 100644 --- a/ql/src/test/queries/clientpositive/vectorized_case.q +++ b/ql/src/test/queries/clientpositive/vectorized_case.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.vectorized.execution.enabled = true ; explain diff --git a/ql/src/test/queries/clientpositive/vectorized_casts.q b/ql/src/test/queries/clientpositive/vectorized_casts.q index 9147a6a..e0083eb 100644 --- a/ql/src/test/queries/clientpositive/vectorized_casts.q +++ b/ql/src/test/queries/clientpositive/vectorized_casts.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled = true; -- Test type casting in vectorized mode to verify end-to-end functionality. diff --git a/ql/src/test/queries/clientpositive/vectorized_context.q b/ql/src/test/queries/clientpositive/vectorized_context.q index 5706b3a..aeb1f2e 100644 --- a/ql/src/test/queries/clientpositive/vectorized_context.q +++ b/ql/src/test/queries/clientpositive/vectorized_context.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; create table store(s_store_sk int, s_city string) stored as orc; insert overwrite table store diff --git a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q index 1fb0dac..f3c07a2 100644 --- a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled = true; -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. diff --git a/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q b/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q index 2553e9f..bad6baa 100644 --- a/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q +++ b/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.map.groupby.sorted=true; diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q index 71971fe..69cdca9 100644 --- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.optimize.ppd=true; set hive.ppd.remove.duplicatefilters=true; set hive.tez.dynamic.partition.pruning=true; diff --git a/ql/src/test/queries/clientpositive/vectorized_mapjoin.q b/ql/src/test/queries/clientpositive/vectorized_mapjoin.q index 10277e5..11004a8 100644 --- a/ql/src/test/queries/clientpositive/vectorized_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vectorized_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; SET hive.auto.convert.join.noconditionaltask=true; diff --git a/ql/src/test/queries/clientpositive/vectorized_math_funcs.q b/ql/src/test/queries/clientpositive/vectorized_math_funcs.q index d6b0824..d79fcce 100644 --- a/ql/src/test/queries/clientpositive/vectorized_math_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_math_funcs.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled = true; -- Test math functions in vectorized mode to verify they run correctly end-to-end. diff --git a/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q b/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q index ebf5902..849af18 100644 --- a/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q +++ b/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=true; SET hive.auto.convert.join.noconditionaltask=true; diff --git a/ql/src/test/queries/clientpositive/vectorized_parquet.q b/ql/src/test/queries/clientpositive/vectorized_parquet.q index 4b14628..a49ca63 100644 --- a/ql/src/test/queries/clientpositive/vectorized_parquet.q +++ b/ql/src/test/queries/clientpositive/vectorized_parquet.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; set hive.exec.submitviachild=true; set hive.exec.submit.local.task.via.child=true; diff --git a/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q b/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q index c9851d2..92aae2c 100644 --- a/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q +++ b/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled=true; SET hive.auto.convert.join=false; diff --git a/ql/src/test/queries/clientpositive/vectorized_string_funcs.q b/ql/src/test/queries/clientpositive/vectorized_string_funcs.q index 96fe53d..d04a3c3 100644 --- a/ql/src/test/queries/clientpositive/vectorized_string_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_string_funcs.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; SET hive.vectorized.execution.enabled = true; -- Test string functions in vectorized mode to verify end-to-end functionality. diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q index c27697a..119c1c7 100644 --- a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q @@ -1,3 +1,4 @@ +set hive.explain.user=false; -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. -- Turning on vectorization has been temporarily moved after filling the test table -- due to bug HIVE-8197. diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out index d39da20..4956032 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out @@ -9,6 +9,7 @@ Stage-0 name:default.src_orc_merge_test_part output format:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition columns:["ds string","ts string"] + PREHOOK: query: create table src_orc_merge_test_part(key int, value string) partitioned by (ds string, ts string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -68,6 +69,7 @@ Stage-3 TableScan [TS_0] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: insert overwrite table src_orc_merge_test_part partition(ds='2012-01-03', ts='2012-01-03+14:46:31') select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -124,6 +126,7 @@ Stage-3 TableScan [TS_0] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY POSTHOOK: query: explain select count(1) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' @@ -160,6 +163,7 @@ Stage-0 TableScan [TS_0] alias:src_orc_merge_test_part Statistics:Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' @@ -197,6 +201,7 @@ Stage-0 TableScan [TS_0] alias:src_orc_merge_test_part Statistics:Num rows: 500 Data size: 47000 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: alter table src_orc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31') concatenate PREHOOK: type: ALTER_PARTITION_MERGE PREHOOK: Input: default@src_orc_merge_test_part @@ -241,6 +246,7 @@ Stage-0 TableScan [TS_0] alias:src_orc_merge_test_part Statistics:Num rows: 1 Data size: 2527 Basic stats: PARTIAL Column stats: NONE + PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' PREHOOK: type: QUERY POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from src_orc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31' @@ -278,6 +284,7 @@ Stage-0 TableScan [TS_0] alias:src_orc_merge_test_part Statistics:Num rows: 24 Data size: 2527 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: drop table src_orc_merge_test_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@src_orc_merge_test_part @@ -372,6 +379,7 @@ Stage-0 TableScan [TS_0] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key PREHOOK: type: QUERY POSTHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key @@ -416,6 +424,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x PREHOOK: type: QUERY POSTHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x @@ -481,6 +490,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c @@ -667,6 +677,7 @@ Stage-0 TableScan [TS_2] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) cbo_t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p left outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int % c asc, cbo_t3.c_int desc @@ -837,6 +848,7 @@ Stage-0 TableScan [TS_10] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c @@ -994,6 +1006,7 @@ Stage-0 TableScan [TS_10] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int @@ -1174,6 +1187,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c @@ -1327,6 +1341,7 @@ Stage-0 TableScan [TS_12] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc PREHOOK: type: QUERY POSTHOOK: query: explain select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc @@ -1366,6 +1381,7 @@ Stage-0 TableScan [TS_0] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select unionsrc.key FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 UNION ALL select 'min' as key, min(c_int) as value from cbo_t3 s2 @@ -1480,6 +1496,7 @@ Stage-0 TableScan [TS_18] alias:s1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'max' as key, max(c_int) as value from cbo_t3 s1 UNION ALL select 'min' as key, min(c_int) as value from cbo_t3 s2 @@ -1627,6 +1644,7 @@ Stage-0 TableScan [TS_18] alias:s1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t1.key from cbo_t1 join cbo_t3 where cbo_t1.key=cbo_t3.key and cbo_t1.key >= 1 @@ -1680,6 +1698,7 @@ Stage-0 TableScan [TS_3] alias:cbo_t3 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 left outer join cbo_t2 on cbo_t1.key=cbo_t2.key @@ -1732,6 +1751,7 @@ Stage-0 TableScan [TS_2] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 full outer join cbo_t2 on cbo_t1.key=cbo_t2.key @@ -1784,6 +1804,7 @@ Stage-0 TableScan [TS_2] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key PREHOOK: type: QUERY POSTHOOK: query: explain select b, cbo_t1.c, cbo_t2.p, q, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1) cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key @@ -1858,6 +1879,7 @@ Stage-0 TableScan [TS_4] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a PREHOOK: type: QUERY POSTHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3)cbo_t3 on cbo_t1.key=a @@ -1931,6 +1953,7 @@ Stage-0 TableScan [TS_4] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) PREHOOK: type: QUERY POSTHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 full outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) @@ -2024,6 +2047,7 @@ Stage-0 TableScan [TS_3] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) PREHOOK: type: QUERY POSTHOOK: query: explain select * from (select q, b, cbo_t2.p, cbo_t1.c, cbo_t3.c_int from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 right outer join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) @@ -2098,6 +2122,7 @@ Stage-0 TableScan [TS_6] alias:cbo_t3 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 PREHOOK: type: QUERY POSTHOOK: query: explain select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 @@ -2155,6 +2180,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 PREHOOK: type: QUERY POSTHOOK: query: explain select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x order by x,y limit 1 @@ -2233,6 +2259,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key from(select key from (select key from cbo_t1 limit 5)cbo_t2 limit 5)cbo_t3 limit 5 @@ -2289,6 +2316,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key, c_int from(select key, c_int from (select key, c_int from cbo_t1 order by c_int limit 5)cbo_t1 order by c_int limit 5)cbo_t2 order by c_int limit 5 @@ -2351,6 +2379,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a limit 5) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc limit 5) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c limit 5 @@ -2552,6 +2581,7 @@ Stage-0 TableScan [TS_2] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) @@ -2613,6 +2643,7 @@ Stage-0 TableScan [TS_3] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) PREHOOK: type: QUERY POSTHOOK: query: explain select * from (select c, b, a from (select key as a, c_int as b, cbo_t1.c_float as c from cbo_t1 where (cbo_t1.c_int + 1 == 2) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)) cbo_t1 left semi join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2 where (cbo_t2.c_int + 1 == 2) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) @@ -2693,6 +2724,7 @@ Stage-0 TableScan [TS_10] alias:cbo_t3 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a PREHOOK: type: QUERY POSTHOOK: query: explain select a, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) cbo_t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0) group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) cbo_t2 on cbo_t1.a=p left semi join cbo_t3 on cbo_t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a @@ -2865,6 +2897,7 @@ Stage-0 TableScan [TS_17] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 PREHOOK: type: QUERY POSTHOOK: query: explain select cbo_t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from cbo_t1 @@ -2878,6 +2911,7 @@ Stage-0 outputColumnNames:["_col0","_col1","_col2"] TableScan [TS_0] alias:cbo_t1 + PREHOOK: query: explain select null from cbo_t1 PREHOOK: type: QUERY POSTHOOK: query: explain select null from cbo_t1 @@ -2891,6 +2925,7 @@ Stage-0 outputColumnNames:["_col0"] TableScan [TS_0] alias:cbo_t1 + PREHOOK: query: explain select key from cbo_t1 where c_int = -6 or c_int = +6 PREHOOK: type: QUERY POSTHOOK: query: explain select key from cbo_t1 where c_int = -6 or c_int = +6 @@ -2906,6 +2941,7 @@ Stage-0 predicate:((c_int = -6) or (c_int = 6)) (type: boolean) TableScan [TS_0] alias:cbo_t1 + PREHOOK: query: explain select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' PREHOOK: type: QUERY POSTHOOK: query: explain select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' @@ -2963,6 +2999,7 @@ Stage-0 TableScan [TS_3] alias:cbo_t2 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from src_cbo b where not exists @@ -3050,6 +3087,7 @@ Stage-0 TableScan [TS_3] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from src_cbo b group by key, value @@ -3135,6 +3173,7 @@ Stage-0 TableScan [TS_0] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: create view cv1 as select * from src_cbo b @@ -3214,6 +3253,7 @@ Stage-0 TableScan [TS_3] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from (select * from src_cbo b @@ -3285,6 +3325,7 @@ Stage-0 TableScan [TS_3] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') @@ -3347,6 +3388,7 @@ Stage-0 TableScan [TS_3] alias:src_cbo Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and @@ -3457,6 +3499,7 @@ Stage-0 TableScan [TS_6] alias:lineitem Statistics:Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') @@ -3600,6 +3643,7 @@ Stage-0 TableScan [TS_20] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name @@ -3698,6 +3742,7 @@ Stage-0 TableScan [TS_8] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from src_cbo where src_cbo.key not in @@ -3817,6 +3862,7 @@ Stage-0 TableScan [TS_3] alias:src_cbo Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, b.p_name, p_size from part b where b.p_name not in @@ -3928,6 +3974,7 @@ Stage-0 TableScan [TS_3] alias:b Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_name, p_size from part where part.p_size not in @@ -4070,6 +4117,7 @@ Stage-0 TableScan [TS_11] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -4254,6 +4302,7 @@ Stage-0 TableScan [TS_8] alias:b Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 PREHOOK: type: QUERY POSTHOOK: query: explain select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1 @@ -4291,6 +4340,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from cbo_t1) cbo_t1 @@ -4328,6 +4378,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 PREHOOK: type: QUERY POSTHOOK: query: explain select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from cbo_t1) cbo_t1 @@ -4365,6 +4416,7 @@ Stage-0 TableScan [TS_0] alias:cbo_t1 Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select *, rank() over(partition by key order by value) as rr from src1 PREHOOK: type: QUERY POSTHOOK: query: explain select *, rank() over(partition by key order by value) as rr from src1 @@ -4401,6 +4453,7 @@ Stage-0 TableScan [TS_0] alias:src1 Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (select x.key AS key, count(1) AS cnt @@ -4500,6 +4553,7 @@ Stage-0 TableScan [TS_2] alias:x Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (select x.key AS key, count(1) AS cnt @@ -4599,6 +4653,7 @@ Stage-0 TableScan [TS_2] alias:x Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (select x.key AS key, count(1) AS cnt @@ -4692,6 +4747,7 @@ Stage-0 TableScan [TS_0] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (select x.key AS key, count(1) AS cnt @@ -4792,6 +4848,7 @@ Stage-0 TableScan [TS_2] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain create table abcd (a int, b int, c int, d int) PREHOOK: type: CREATETABLE POSTHOOK: query: explain create table abcd (a int, b int, c int, d int) @@ -4802,6 +4859,7 @@ Stage-0 input format:org.apache.hadoop.mapred.TextInputFormat name:default.abcd output format:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + PREHOOK: query: create table abcd (a int, b int, c int, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -4859,6 +4917,7 @@ Stage-0 TableScan [TS_0] alias:abcd Statistics:Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a PREHOOK: type: QUERY POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d) from abcd group by a @@ -4895,6 +4954,7 @@ Stage-0 TableScan [TS_0] alias:abcd Statistics:Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain create table src_rc_merge_test(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE POSTHOOK: query: explain create table src_rc_merge_test(key int, value string) stored as rcfile @@ -4905,6 +4965,7 @@ Stage-0 input format:org.apache.hadoop.hive.ql.io.RCFileInputFormat name:default.src_rc_merge_test output format:org.apache.hadoop.hive.ql.io.RCFileOutputFormat + PREHOOK: query: create table src_rc_merge_test(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -4931,6 +4992,7 @@ Stage-0 input format:org.apache.hadoop.hive.ql.io.RCFileInputFormat name:default.tgt_rc_merge_test output format:org.apache.hadoop.hive.ql.io.RCFileOutputFormat + PREHOOK: query: create table tgt_rc_merge_test(key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -4997,6 +5059,7 @@ Stage-0 TableScan [TS_0] alias:tgt_rc_merge_test Statistics:Num rows: 5 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test PREHOOK: type: QUERY POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test @@ -5030,6 +5093,7 @@ Stage-0 TableScan [TS_0] alias:tgt_rc_merge_test Statistics:Num rows: 5 Data size: 32 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: alter table tgt_rc_merge_test concatenate PREHOOK: type: ALTER_TABLE_MERGE PREHOOK: Input: default@tgt_rc_merge_test @@ -5086,6 +5150,7 @@ Stage-0 TableScan [TS_0] alias:tgt_rc_merge_test Statistics:Num rows: 1 Data size: 171 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test PREHOOK: type: QUERY POSTHOOK: query: explain select sum(hash(key)), sum(hash(value)) from tgt_rc_merge_test @@ -5119,6 +5184,7 @@ Stage-0 TableScan [TS_0] alias:tgt_rc_merge_test Statistics:Num rows: 1 Data size: 171 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: drop table src_rc_merge_test PREHOOK: type: DROPTABLE PREHOOK: Input: default@src_rc_merge_test @@ -5178,6 +5244,7 @@ Stage-0 TableScan [TS_2] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain create table nzhang_Tmp(a int, b string) PREHOOK: type: CREATETABLE POSTHOOK: query: explain create table nzhang_Tmp(a int, b string) @@ -5188,6 +5255,7 @@ Stage-0 input format:org.apache.hadoop.mapred.TextInputFormat name:default.nzhang_Tmp output format:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + PREHOOK: query: create table nzhang_Tmp(a int, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -5253,6 +5321,7 @@ Stage-3 Stage-0 Move Operator Please refer to the previous Stage-1 + PREHOOK: query: create table nzhang_CTAS1 as select key k, value from src sort by k, value limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src @@ -5320,6 +5389,7 @@ Stage-3 Stage-0 Move Operator Please refer to the previous Stage-1 + PREHOOK: query: create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb from src sort by half_key, conb limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src @@ -5334,6 +5404,7 @@ PREHOOK: query: explain create table if not exists nzhang_ctas3 as select key, v PREHOOK: type: CREATETABLE POSTHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 POSTHOOK: type: CREATETABLE + PREHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 PREHOOK: type: CREATETABLE POSTHOOK: query: create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2 @@ -5350,6 +5421,7 @@ Stage-0 input format:org.apache.hadoop.hive.ql.io.orc.OrcInputFormat name:default.acid_dtt output format:org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -5430,6 +5502,7 @@ Stage-0 TableScan [TS_3] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: CREATE TABLE myinput1(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -5492,6 +5565,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key PREHOOK: type: QUERY POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key @@ -5557,6 +5631,7 @@ Stage-0 TableScan [TS_2] alias:c Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key PREHOOK: type: QUERY POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key @@ -5613,6 +5688,7 @@ Stage-0 TableScan [TS_2] alias:c Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value @@ -5675,6 +5751,7 @@ Stage-0 TableScan [TS_2] alias:c Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value PREHOOK: type: QUERY POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value @@ -5728,6 +5805,7 @@ Stage-0 TableScan [TS_2] alias:c Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY POSTHOOK: query: explain select * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value @@ -5774,6 +5852,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY POSTHOOK: query: explain select * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value @@ -5820,6 +5899,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY POSTHOOK: query: explain select * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value @@ -5866,6 +5946,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value @@ -5912,6 +5993,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: CREATE TABLE smb_input(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -6016,6 +6098,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 26 Data size: 190 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value @@ -6060,6 +6143,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 26 Data size: 190 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key @@ -6106,6 +6190,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 26 Data size: 190 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key @@ -6152,6 +6237,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 26 Data size: 190 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key @@ -6198,6 +6284,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 26 Data size: 190 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: drop table sales PREHOOK: type: DROPTABLE POSTHOOK: query: drop table sales @@ -6310,6 +6397,7 @@ Stage-0 TableScan [TS_2] alias:things Statistics:Num rows: 2 Data size: 12 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: drop table sales PREHOOK: type: DROPTABLE PREHOOK: Input: default@sales @@ -6404,6 +6492,7 @@ Stage-0 TableScan [TS_3] alias:srcpart Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450' PREHOOK: type: QUERY POSTHOOK: query: explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450' @@ -6482,6 +6571,7 @@ Stage-0 TableScan [TS_3] alias:srcpart Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450' PREHOOK: type: QUERY POSTHOOK: query: explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450' @@ -6560,6 +6650,7 @@ Stage-0 TableScan [TS_3] alias:srcpart Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -6627,6 +6718,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz @@ -6709,6 +6801,7 @@ Stage-0 TableScan [TS_0] alias:p1 Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -6776,6 +6869,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -6843,6 +6937,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -6934,6 +7029,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select abc.* from noop(on part @@ -6998,6 +7094,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r @@ -7060,6 +7157,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -7128,6 +7226,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -7193,6 +7292,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, @@ -7277,6 +7377,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, sub1.cd, sub1.s1 @@ -7350,6 +7451,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, @@ -7441,6 +7543,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s @@ -7462,6 +7565,7 @@ Stage-0 sum(p_retailprice) as s from part group by p_mfgr, p_brand + PREHOOK: query: CREATE TABLE part_4( p_mfgr STRING, p_name STRING, @@ -7635,6 +7739,7 @@ Stage-4 Move Operator table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.part_4","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} Please refer to the previous Stage-3 + PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, @@ -7733,6 +7838,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -7842,6 +7948,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, @@ -7933,6 +8040,7 @@ Stage-0 TableScan [TS_0] alias:part Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select distinct src.* from src PREHOOK: type: QUERY POSTHOOK: query: explain select distinct src.* from src @@ -7978,6 +8086,7 @@ Stage-0 TableScan [TS_0] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select explode(array('a', 'b')) PREHOOK: type: QUERY POSTHOOK: query: explain select explode(array('a', 'b')) @@ -7993,6 +8102,7 @@ Stage-0 outputColumnNames:["_col0"] TableScan [TS_0] alias:_dummy_table + PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -8130,6 +8240,7 @@ Stage-3 TableScan [TS_2] alias:src1 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: FROM src src1 JOIN src src2 ON (src1.key = src2.key) INSERT OVERWRITE TABLE dest_j1 select src1.key, src2.value PREHOOK: type: QUERY @@ -8221,6 +8332,7 @@ Stage-0 TableScan [TS_0] alias:a Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ STREAMTABLE(a,c) */ * FROM T1 a JOIN T2 b ON a.key = b.key @@ -8300,6 +8412,7 @@ Stage-0 TableScan [TS_0] alias:a Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY POSTHOOK: query: explain FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) @@ -8363,6 +8476,7 @@ Stage-0 TableScan [TS_1] alias:c Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: FROM T1 a JOIN src c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -8439,6 +8553,7 @@ Stage-0 TableScan [TS_2] alias:src Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + PREHOOK: query: explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ mapjoin(k)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.val @@ -8501,6 +8616,7 @@ Stage-0 TableScan [TS_0] alias:k Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key PREHOOK: type: QUERY POSTHOOK: query: explain select sum(hash(k.key)), sum(hash(v.val)) from T1 k join T1 v on k.key=v.key @@ -8564,6 +8680,7 @@ Stage-0 TableScan [TS_0] alias:k Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select count(1) from T1 a join T1 b on a.key = b.key PREHOOK: type: QUERY POSTHOOK: query: explain select count(1) from T1 a join T1 b on a.key = b.key @@ -8624,6 +8741,7 @@ Stage-0 TableScan [TS_0] alias:a Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY POSTHOOK: query: explain FROM T1 a LEFT OUTER JOIN T2 c ON c.key+1=a.key select sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) @@ -8681,6 +8799,7 @@ Stage-0 |<-TableScan [TS_0] alias:a Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY POSTHOOK: query: explain FROM T1 a RIGHT OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) @@ -8738,6 +8857,7 @@ Stage-0 |<-TableScan [TS_1] alias:c Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) PREHOOK: type: QUERY POSTHOOK: query: explain FROM T1 a FULL OUTER JOIN T2 c ON c.key+1=a.key select /*+ STREAMTABLE(a) */ sum(hash(a.key)), sum(hash(a.val)), sum(hash(c.key)) @@ -8802,6 +8922,7 @@ Stage-0 TableScan [TS_1] alias:c Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key PREHOOK: type: QUERY POSTHOOK: query: explain select /*+ mapjoin(v)*/ sum(hash(k.key)), sum(hash(v.val)) from T1 k left outer join T1 v on k.key+1=v.key @@ -8859,3 +8980,4 @@ Stage-0 |<-TableScan [TS_0] alias:k Statistics:Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE + diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out index 222e89e..da3b380 100644 --- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out +++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out @@ -260,6 +260,7 @@ Stage-0 TableScan [TS_5] alias:x Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: EXPLAIN select ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3) @@ -573,6 +574,7 @@ Stage-0 TableScan [TS_14] alias:ss Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -846,6 +848,7 @@ Stage-0 TableScan [TS_31] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -1369,6 +1372,7 @@ Stage-0 TableScan [TS_2] alias:x Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: EXPLAIN SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -1447,6 +1451,7 @@ Stage-0 TableScan [TS_2] alias:z Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: EXPLAIN select ss.k1,sr.k2,cs.k3,count(ss.v1),count(sr.v2),count(cs.v3) @@ -1710,6 +1715,7 @@ Stage-0 TableScan [TS_11] alias:srcpart Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -1957,6 +1963,7 @@ Stage-0 TableScan [TS_27] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -2438,6 +2445,7 @@ Stage-0 TableScan [TS_0] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -2605,6 +2613,7 @@ Stage-0 TableScan [TS_0] alias:s1 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value PREHOOK: type: QUERY @@ -2667,6 +2676,7 @@ Stage-0 TableScan [TS_2] alias:s2 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key PREHOOK: type: QUERY @@ -2702,6 +2712,7 @@ Stage-0 TableScan [TS_0] alias:s1 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select s1.key as key, s1.value as value from tab s1 join tab2 s3 on s1.key=s3.key join tab2 s2 on s1.value=s2.value PREHOOK: type: QUERY @@ -2764,6 +2775,7 @@ Stage-0 TableScan [TS_2] alias:s2 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key UNION ALL @@ -2862,6 +2874,7 @@ Stage-0 TableScan [TS_8] alias:s2 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key join tab s2 on s1.value=s2.value UNION ALL @@ -2984,6 +2997,7 @@ Stage-0 TableScan [TS_14] alias:s2 Statistics:Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -3377,6 +3391,7 @@ Stage-0 TableScan [TS_4] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain SELECT x.key, y.value FROM src1 x JOIN src y ON (x.key = y.key) @@ -3880,6 +3895,7 @@ Stage-0 TableScan [TS_80] alias:y Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: CREATE TABLE a(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -4401,6 +4417,7 @@ Stage-11 Stage-5 Stats-Aggr Operator Please refer to the previous Stage-0 + PREHOOK: query: explain FROM ( @@ -4920,6 +4937,7 @@ Stage-5 Move Operator table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.a","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} Please refer to the previous Stage-4 + PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -5058,6 +5076,7 @@ Stage-4 Move Operator table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.dest1","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} Please refer to the previous Stage-3 + PREHOOK: query: EXPLAIN FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b (b.key), PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FROM UNIQUEJOIN PRESERVE src a (a.key), PRESERVE src1 b (b.key), PRESERVE srcpart c (c.key) SELECT a.key, b.key, c.key @@ -5111,6 +5130,7 @@ Stage-0 TableScan [TS_2] alias:c Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: EXPLAIN SELECT TRANSFORM(a.key, a.value) USING 'cat' AS (tkey, tvalue) @@ -5171,6 +5191,7 @@ Stage-0 TableScan [TS_1] alias:b Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + PREHOOK: query: explain FROM ( select key, value from ( @@ -5347,6 +5368,7 @@ Stage-4 Move Operator table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.dest1","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} Please refer to the previous Stage-3 + PREHOOK: query: explain FROM ( select 'tst1' as key, cast(count(1) as string) as value, 'tst1' as value2 from src s1 @@ -5494,3 +5516,4 @@ Stage-4 Move Operator table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.dest1","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} Please refer to the previous Stage-3 + diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out new file mode 100644 index 0000000..9358158 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out @@ -0,0 +1,522 @@ +PREHOOK: query: explain select key, value +FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol +PREHOOK: type: QUERY +POSTHOOK: query: explain select key, value +FROM srcpart LATERAL VIEW explode(array(1,2,3)) myTable AS myCol +POSTHOOK: type: QUERY +Plan not optimized by CBO. + +Stage-0 + Fetch Operator + limit:-1 + Select Operator [SEL_6] + outputColumnNames:["_col0","_col1"] + Lateral View Join Operator [LVJ_5] + outputColumnNames:["_col0","_col1","_col7"] + Select Operator [SEL_2] + outputColumnNames:["key","value"] + Lateral View Forward [LVF_1] + TableScan [TS_0] + alias:srcpart + Select Operator [SEL_6] + outputColumnNames:["_col0","_col1"] + Lateral View Join Operator [LVJ_5] + outputColumnNames:["_col0","_col1","_col7"] + UDTF Operator [UDTF_4] + function name:explode + Select Operator [SEL_3] + outputColumnNames:["_col0"] + Please refer to the previous Lateral View Forward [LVF_1] + +PREHOOK: query: explain show tables +PREHOOK: type: SHOWTABLES +POSTHOOK: query: explain show tables +POSTHOOK: type: SHOWTABLES +Stage-1 + Fetch Operator + limit:-1 + Stage-0 + Show Table Operator: + database name:default + +#### A masked pattern was here #### +PREHOOK: type: CREATEDATABASE +#### A masked pattern was here #### +POSTHOOK: type: CREATEDATABASE +Stage-0 + +#### A masked pattern was here #### +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:newDB +#### A masked pattern was here #### +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:newDB +#### A masked pattern was here #### +PREHOOK: query: explain describe database extended newDB +PREHOOK: type: DESCDATABASE +POSTHOOK: query: explain describe database extended newDB +POSTHOOK: type: DESCDATABASE +Stage-1 + Fetch Operator + limit:-1 + Stage-0 + +PREHOOK: query: describe database extended newDB +PREHOOK: type: DESCDATABASE +PREHOOK: Input: database:newdb +POSTHOOK: query: describe database extended newDB +POSTHOOK: type: DESCDATABASE +POSTHOOK: Input: database:newdb +newdb location/in/test hive_test_user USER +PREHOOK: query: explain use newDB +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: explain use newDB +POSTHOOK: type: SWITCHDATABASE +Stage-0 + +PREHOOK: query: use newDB +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:newdb +POSTHOOK: query: use newDB +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:newdb +PREHOOK: query: create table tab (name string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:newdb +PREHOOK: Output: newDB@tab +POSTHOOK: query: create table tab (name string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:newdb +POSTHOOK: Output: newDB@tab +PREHOOK: query: explain alter table tab rename to newName +PREHOOK: type: ALTERTABLE_RENAME +POSTHOOK: query: explain alter table tab rename to newName +POSTHOOK: type: ALTERTABLE_RENAME +Stage-0 + Alter Table Operator: + new name:newDB.newName + old name:newDB.tab + type:rename + +PREHOOK: query: explain drop table tab +PREHOOK: type: DROPTABLE +POSTHOOK: query: explain drop table tab +POSTHOOK: type: DROPTABLE +Stage-0 + Drop Table Operator: + table:tab + +PREHOOK: query: drop table tab +PREHOOK: type: DROPTABLE +PREHOOK: Input: newdb@tab +PREHOOK: Output: newdb@tab +POSTHOOK: query: drop table tab +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: newdb@tab +POSTHOOK: Output: newdb@tab +PREHOOK: query: explain use default +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: explain use default +POSTHOOK: type: SWITCHDATABASE +Stage-0 + +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:default +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:default +PREHOOK: query: drop database newDB +PREHOOK: type: DROPDATABASE +PREHOOK: Input: database:newdb +PREHOOK: Output: database:newdb +POSTHOOK: query: drop database newDB +POSTHOOK: type: DROPDATABASE +POSTHOOK: Input: database:newdb +POSTHOOK: Output: database:newdb +PREHOOK: query: explain analyze table src compute statistics +PREHOOK: type: QUERY +POSTHOOK: query: explain analyze table src compute statistics +POSTHOOK: type: QUERY +Stage-2 + Stats-Aggr Operator + Stage-0 + Map 1 + TableScan [TS_0] + alias:src + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + +PREHOOK: query: explain analyze table src compute statistics for columns +PREHOOK: type: QUERY +POSTHOOK: query: explain analyze table src compute statistics for columns +POSTHOOK: type: QUERY +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) + +Stage-2 + Column Stats Work{} + Stage-0 + Reducer 2 + File Output Operator [FS_6] + compressed:false + Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} + Group By Operator [GBY_4] + | aggregations:["compute_stats(VALUE._col0)","compute_stats(VALUE._col1)"] + | outputColumnNames:["_col0","_col1"] + | Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + |<-Map 1 [SIMPLE_EDGE] + Reduce Output Operator [RS_3] + sort order: + Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions:_col0 (type: struct), _col1 (type: struct) + Group By Operator [GBY_2] + aggregations:["compute_stats(key, 16)","compute_stats(value, 16)"] + outputColumnNames:["_col0","_col1"] + Statistics:Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Select Operator [SEL_1] + outputColumnNames:["key","value"] + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + TableScan [TS_0] + alias:src + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + +PREHOOK: query: explain +CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)) +PREHOOK: type: CREATEMACRO +POSTHOOK: query: explain +CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)) +POSTHOOK: type: CREATEMACRO +Stage-0 + +PREHOOK: query: CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)) +PREHOOK: type: CREATEMACRO +PREHOOK: Output: database:default +POSTHOOK: query: CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x)) +POSTHOOK: type: CREATEMACRO +POSTHOOK: Output: database:default +PREHOOK: query: EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1 +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-0 + Fetch Operator + limit:1 + Limit [LIM_2] + Number of rows:1 + Select Operator [SEL_1] + outputColumnNames:["_col0"] + TableScan [TS_0] + alias:src + +PREHOOK: query: explain DROP TEMPORARY MACRO SIGMOID +PREHOOK: type: DROPMACRO +POSTHOOK: query: explain DROP TEMPORARY MACRO SIGMOID +POSTHOOK: type: DROPMACRO +Stage-0 + +PREHOOK: query: DROP TEMPORARY MACRO SIGMOID +PREHOOK: type: DROPMACRO +PREHOOK: Output: database:default +POSTHOOK: query: DROP TEMPORARY MACRO SIGMOID +POSTHOOK: type: DROPMACRO +POSTHOOK: Output: database:default +PREHOOK: query: explain create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: explain create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +Plan optimized by CBO. + +Stage-3 + Stats-Aggr Operator + Stage-4 + Create Table Operator: + columns:["key string","value string"] + input format:org.apache.hadoop.mapred.TextInputFormat + name:default.src_autho_test + output format:org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + Stage-2 + Dependency Collection{} + Stage-1 + Map 1 + File Output Operator [FS_2] + compressed:false + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","name:":"default.src_autho_test","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} + Select Operator [SEL_1] + outputColumnNames:["_col0","_col1"] + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + TableScan [TS_0] + alias:src + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Stage-0 + Move Operator + Please refer to the previous Stage-1 + +PREHOOK: query: create table src_autho_test as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: create table src_autho_test as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: explain grant select on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: explain grant select on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +Stage-0 + +PREHOOK: query: grant select on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: explain show grant user hive_test_user on table src_autho_test +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: explain show grant user hive_test_user on table src_autho_test +POSTHOOK: type: SHOW_GRANT +Stage-1 + Fetch Operator + limit:-1 + Stage-0 + +PREHOOK: query: explain show grant user hive_test_user on table src_autho_test(key) +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: explain show grant user hive_test_user on table src_autho_test(key) +POSTHOOK: type: SHOW_GRANT +Stage-1 + Fetch Operator + limit:-1 + Stage-0 + +PREHOOK: query: select key from src_autho_test order by key limit 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src_autho_test +#### A masked pattern was here #### +POSTHOOK: query: select key from src_autho_test order by key limit 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_autho_test +#### A masked pattern was here #### +0 +0 +0 +10 +100 +100 +103 +103 +104 +104 +105 +11 +111 +113 +113 +114 +116 +118 +118 +119 +PREHOOK: query: explain revoke select on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: explain revoke select on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +Stage-0 + +PREHOOK: query: explain grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: type: GRANT_PRIVILEGE +POSTHOOK: query: explain grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: type: GRANT_PRIVILEGE +Stage-0 + +PREHOOK: query: explain revoke select(key) on table src_autho_test from user hive_test_user +PREHOOK: type: REVOKE_PRIVILEGE +POSTHOOK: query: explain revoke select(key) on table src_autho_test from user hive_test_user +POSTHOOK: type: REVOKE_PRIVILEGE +Stage-0 + +PREHOOK: query: explain +create role sRc_roLE +PREHOOK: type: CREATEROLE +POSTHOOK: query: explain +create role sRc_roLE +POSTHOOK: type: CREATEROLE +Stage-0 + +PREHOOK: query: create role sRc_roLE +PREHOOK: type: CREATEROLE +POSTHOOK: query: create role sRc_roLE +POSTHOOK: type: CREATEROLE +PREHOOK: query: explain +grant role sRc_roLE to user hive_test_user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: explain +grant role sRc_roLE to user hive_test_user +POSTHOOK: type: GRANT_ROLE +Stage-0 + +PREHOOK: query: grant role sRc_roLE to user hive_test_user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: grant role sRc_roLE to user hive_test_user +POSTHOOK: type: GRANT_ROLE +PREHOOK: query: explain show role grant user hive_test_user +PREHOOK: type: SHOW_ROLE_GRANT +POSTHOOK: query: explain show role grant user hive_test_user +POSTHOOK: type: SHOW_ROLE_GRANT +Stage-1 + Fetch Operator + limit:-1 + Stage-0 + +PREHOOK: query: explain drop role sRc_roLE +PREHOOK: type: DROPROLE +POSTHOOK: query: explain drop role sRc_roLE +POSTHOOK: type: DROPROLE +Stage-0 + +PREHOOK: query: drop role sRc_roLE +PREHOOK: type: DROPROLE +POSTHOOK: query: drop role sRc_roLE +POSTHOOK: type: DROPROLE +PREHOOK: query: drop table src_autho_test +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_autho_test +PREHOOK: Output: default@src_autho_test +POSTHOOK: query: drop table src_autho_test +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_autho_test +POSTHOOK: Output: default@src_autho_test +PREHOOK: query: explain drop view v +PREHOOK: type: DROPVIEW +POSTHOOK: query: explain drop view v +POSTHOOK: type: DROPVIEW +Stage-0 + Drop Table Operator: + table:v + +PREHOOK: query: explain create view v as with cte as (select * from src order by key limit 5) +select * from cte +PREHOOK: type: CREATEVIEW +POSTHOOK: query: explain create view v as with cte as (select * from src order by key limit 5) +select * from cte +POSTHOOK: type: CREATEVIEW +Plan not optimized by CBO. + +Stage-0 + Create View Operator: + name:default.v + original text:with cte as (select * from src order by key limit 5) +select * from cte + +PREHOOK: query: explain with cte as (select * from src order by key limit 5) +select * from cte +PREHOOK: type: QUERY +POSTHOOK: query: explain with cte as (select * from src order by key limit 5) +select * from cte +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) + +Stage-0 + Fetch Operator + limit:5 + Stage-1 + Reducer 2 + File Output Operator [FS_5] + compressed:false + Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE + table:{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"} + Limit [LIM_4] + Number of rows:5 + Statistics:Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE + Select Operator [SEL_3] + | outputColumnNames:["_col0","_col1"] + | Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + |<-Map 1 [SIMPLE_EDGE] + Reduce Output Operator [RS_2] + key expressions:_col0 (type: string) + sort order:+ + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions:_col1 (type: string) + Select Operator [SEL_1] + outputColumnNames:["_col0","_col1"] + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + TableScan [TS_0] + alias:src + Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' into table orc_merge5 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@orc_merge5 +PREHOOK: query: explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table orc_merge5 select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +Plan optimized by CBO. + +Stage-3 + Stats-Aggr Operator + Stage-0 + Move Operator + table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"} + Stage-2 + Dependency Collection{} + Stage-5(CONDITIONAL) + Move Operator + Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6) + Conditional Operator + Stage-1 + Map 1 + File Output Operator [FS_3] + compressed:false + Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + table:{"serde:":"org.apache.hadoop.hive.ql.io.orc.OrcSerde","name:":"default.orc_merge5","input format:":"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat","output format:":"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"} + Select Operator [SEL_2] + outputColumnNames:["_col0","_col1","_col2","_col3","_col4"] + Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + Filter Operator [FIL_4] + predicate:(userid <= 13) (type: boolean) + Statistics:Num rows: 306 Data size: 82044 Basic stats: COMPLETE Column stats: NONE + TableScan [TS_0] + alias:orc_merge5 + Statistics:Num rows: 919 Data size: 246402 Basic stats: COMPLETE Column stats: NONE + Stage-4(CONDITIONAL) + File Merge + ORC File Merge Operator [OFM_7] + Please refer to the previous Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6) + Stage-7 + Move Operator + Stage-6(CONDITIONAL) + File Merge + ORC File Merge Operator [OFM_7] + Please refer to the previous Stage-8(CONDITIONAL CHILD TASKS: Stage-5, Stage-4, Stage-6) + +PREHOOK: query: drop table orc_merge5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5 +POSTHOOK: query: drop table orc_merge5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5