diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ba4d0de..be67b60 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -522,8 +522,6 @@
HIVEDEBUGLOCALTASK("hive.debug.localtask",false),
- HIVEJOBPROGRESS("hive.task.progress", false),
-
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"),
HIVEENFORCEBUCKETING("hive.enforce.bucketing", false),
@@ -781,6 +779,9 @@
"org.apache.hadoop.hive.ql.exec.PTFPersistence$PartitionedByteBasedList"),
HIVE_PTF_PARTITION_PERSISTENT_SIZE("hive.ptf.partition.persistence.memsize",
(int) Math.pow(2, (5 + 10 + 10)) ), // 32MB
+ HIVECOUNTERGROUP("hive.counters.group.name", "HIVE"),
+ HIVECOUNTERCREATEDFILES("hive.counters.created.files", "CREATED_FILES"),
+ HIVECOUNTERFATAL("hive.counters.fatal", "FATAL_ERROR"),
;
public final String varname;
diff --git a/conf/hive-default.xml.template b/conf/hive-default.xml.template
index 5de5965..675ea7c 100644
--- a/conf/hive-default.xml.template
+++ b/conf/hive-default.xml.template
@@ -716,12 +716,6 @@
- hive.task.progress
- false
- Whether Hive should periodically update task progress counters during execution. Enabling this allows task progress to be monitored more closely in the job tracker, but may impose a performance penalty. This flag is automatically set to true for jobs with hive.exec.dynamic.partition set to true.
-
-
-
hive.hwi.war.file
lib/hive-hwi-@VERSION@.war
This sets the path to the HWI war file, relative to ${HIVE_HOME}.
diff --git a/data/conf/hive-site.xml b/data/conf/hive-site.xml
index 4e6ff16..15353a7 100644
--- a/data/conf/hive-site.xml
+++ b/data/conf/hive-site.xml
@@ -144,12 +144,6 @@
- hive.task.progress
- false
- Track progress of a task
-
-
-
hive.support.concurrency
true
Whether hive supports concurrency or not. A zookeeper instance must be up and running for the default hive lock manager to support read-write locks.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 8330f65..b77f50c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -368,6 +368,9 @@
+ "running your custom script."),
SCRIPT_CLOSING_ERROR(20003, "An error occurred when trying to close the Operator " +
"running your custom script."),
+ DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR(20004, "Fatal error occurred when node " +
+ "tried to create too many dynamic partitions. The maximum number of dynamic partitions " +
+ "is controlled by hive.exec.max.dynamic.partitions and hive.exec.max.dynamic.partitions.pernode. "),
STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " +
"There was a error to retrieve the StatsPublisher, and retrying " +
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 1b3a226..a6e5054 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -333,7 +333,6 @@ private void updateCountersInQueryPlan() {
// if the task has started, all operators within the task have
// started
op.setStarted(started.contains(task.getTaskId()));
- op.setOperatorCounters(counters.get(op.getOperatorId()));
// if the task is done, all operators are done as well
op.setDone(done.contains(task.getTaskId()));
}
@@ -382,8 +381,6 @@ private void extractCounters() throws IOException {
}
if (task instanceof ExecDriver) {
ExecDriver mrTask = (ExecDriver) task;
- extractOperatorCounters(mrTask.getWork().getAliasToWork().values(),
- task.getId() + "_MAP");
if (mrTask.mapStarted()) {
started.add(task.getId() + "_MAP");
}
@@ -394,7 +391,6 @@ private void extractCounters() throws IOException {
Collection> reducerTopOps =
new ArrayList>();
reducerTopOps.add(mrTask.getWork().getReducer());
- extractOperatorCounters(reducerTopOps, task.getId() + "_REDUCE");
if (mrTask.reduceStarted()) {
started.add(task.getId() + "_REDUCE");
}
@@ -413,34 +409,6 @@ private void extractCounters() throws IOException {
}
}
- private void extractOperatorCounters(
- Collection> topOps, String taskId) {
- Queue> opsToVisit =
- new LinkedList>();
- Set> opsVisited =
- new HashSet>();
- opsToVisit.addAll(topOps);
- while (opsToVisit.size() != 0) {
- Operator extends OperatorDesc> op = opsToVisit.remove();
- opsVisited.add(op);
- Map ctrs = op.getCounters();
- if (ctrs != null) {
- counters.put(op.getOperatorId(), op.getCounters());
- }
- if (op.getDone()) {
- done.add(op.getOperatorId());
- }
- if (op.getChildOperators() != null) {
- for (Operator extends OperatorDesc> childOp : op.getChildOperators()) {
- if (!opsVisited.contains(childOp)) {
- opsToVisit.add(childOp);
- }
- }
- }
- }
-
- }
-
public org.apache.hadoop.hive.ql.plan.api.Query getQueryPlan()
throws IOException {
if (query.getStageGraph() == null) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
index 00e4158..cfd7e67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java
@@ -58,12 +58,6 @@
transient int numMapRowsRead;
- private static final transient String[] FATAL_ERR_MSG = {
- null, // counter value 0 means no error
- "Mapside join exceeds available memory. "
- + "Please try removing the mapjoin hint."
- };
-
transient boolean firstRow;
@@ -123,13 +117,6 @@ protected void initializeOp(Configuration hconf) throws HiveException {
initializeChildren(hconf);
}
-
- @Override
- protected void fatalErrorMessage(StringBuilder errMsg, long counterCode) {
- errMsg.append("Operator " + getOperatorId() + " (id=" + id + "): "
- + FATAL_ERR_MSG[(int) counterCode]);
- }
-
@Override
public OperatorType getType() {
return OperatorType.MAPJOIN;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index ca99f9b..31d6b7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -132,8 +132,6 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.nextSz = clone.nextSz;
this.childOperators = clone.childOperators;
this.parentOperators = clone.parentOperators;
- this.counterNames = clone.counterNames;
- this.counterNameToEnum = clone.counterNameToEnum;
this.done = clone.done;
this.operatorId = clone.operatorId;
this.storage = clone.storage;
@@ -141,12 +139,9 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.conf = clone.getConf();
this.setSchema(clone.getSchema());
this.alias = clone.alias;
- this.beginTime = clone.beginTime;
- this.inputRows = clone.inputRows;
this.childOperatorsArray = clone.childOperatorsArray;
this.childOperatorsTag = clone.childOperatorsTag;
this.colExprMap = clone.colExprMap;
- this.counters = clone.counters;
this.dummyObj = clone.dummyObj;
this.dummyObjVectors = clone.dummyObjVectors;
this.forwardCache = clone.forwardCache;
@@ -155,7 +150,6 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.hconf = clone.hconf;
this.id = clone.id;
this.inputObjInspectors = clone.inputObjInspectors;
- this.inputRows = clone.inputRows;
this.noOuterJoin = clone.noOuterJoin;
this.numAliases = clone.numAliases;
this.operatorId = clone.operatorId;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index b28d16e..427c04a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -494,7 +494,7 @@ public boolean pushRow() throws IOException, HiveException {
}
protected void pushRow(InspectableObject row) throws HiveException {
- operator.process(row.o, 0);
+ operator.processOp(row.o, 0);
}
private transient final InspectableObject inspectable = new InspectableObject();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index ab197f0..62a21c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -34,8 +34,10 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.SkewedValueList;
import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
import org.apache.hadoop.hive.ql.io.HiveKey;
import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
@@ -91,11 +93,6 @@
private transient boolean statsCollectRawDataSize;
- private static final transient String[] FATAL_ERR_MSG = {
- null, // counter value 0 means no error
- "Number of dynamic partitions exceeded hive.exec.max.dynamic.partitions.pernode."
- };
-
/**
* RecordWriter.
*
@@ -518,7 +515,9 @@ private void createBucketFiles(FSPaths fsp) throws HiveException {
reporter);
// increment the CREATED_FILES counter
if (reporter != null) {
- reporter.incrCounter(ProgressCounter.CREATED_FILES, 1);
+ reporter.incrCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERCREATEDFILES),
+ 1);
}
filesIdx++;
}
@@ -573,16 +572,6 @@ public void processOp(Object row, int tag) throws HiveException {
}
}
- // Since File Sink is a terminal operator, forward is not called - so,
- // maintain the number of output rows explicitly
- if (counterNameToEnum != null) {
- ++outputRows;
- if (outputRows % 1000 == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
-
try {
updateProgress();
@@ -754,12 +743,10 @@ private FSPaths getDynOutPaths(List row, String lbDirName) throws HiveEx
if (fsp2 == null) {
// check # of dp
if (valToPaths.size() > maxPartitions) {
- // throw fatal error
- if (counterNameToEnum != null) {
- incrCounter(fatalErrorCntr, 1);
- }
- fatalError = true;
- LOG.error("Fatal error was thrown due to exceeding number of dynamic partitions");
+ // we cannot proceed and need to tell the hive client that retries won't succeed either
+ throw new HiveFatalException(
+ ErrorMsg.DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR.getErrorCodedMsg()
+ + "Maximum was set to: " + maxPartitions);
}
fsp2 = createNewPaths(dpDir);
}
@@ -792,54 +779,6 @@ private String getDynPartDirectory(List row, List dpColNames, in
return FileUtils.makePartName(dpColNames, row);
}
- @Override
- protected void fatalErrorMessage(StringBuilder errMsg, long counterCode) {
- errMsg.append("Operator ").append(getOperatorId()).append(" (id=").append(id).append("): ");
- errMsg.append(counterCode > FATAL_ERR_MSG.length - 1 ?
- "fatal error." :
- FATAL_ERR_MSG[(int) counterCode]);
- // number of partitions exceeds limit, list all the partition names
- if (counterCode > 0) {
- errMsg.append(lsDir());
- }
- }
-
- // sample the partitions that are generated so that users have a sense of what's causing the error
- private String lsDir() {
- String specPath = conf.getDirName();
- // need to get a JobConf here because it's not passed through at client side
- JobConf jobConf = new JobConf();
- Path tmpPath = Utilities.toTempPath(specPath);
- StringBuilder sb = new StringBuilder("\n");
- try {
- DynamicPartitionCtx dpCtx = conf.getDynPartCtx();
- int numDP = dpCtx.getNumDPCols();
- FileSystem fs = tmpPath.getFileSystem(jobConf);
- int level = numDP;
- if (conf.isLinkedFileSink()) {
- level++;
- }
- FileStatus[] status = Utilities.getFileStatusRecurse(tmpPath, level, fs);
- sb.append("Sample of ")
- .append(Math.min(status.length, 100))
- .append(" partitions created under ")
- .append(tmpPath.toString())
- .append(":\n");
- for (int i = 0; i < status.length; ++i) {
- sb.append("\t.../");
- sb.append(getPartitionSpec(status[i].getPath(), numDP))
- .append("\n");
- }
- sb.append("...\n");
- } catch (Exception e) {
- // cannot get the subdirectories, just return the root directory
- sb.append(tmpPath).append("...\n").append(e.getMessage());
- e.printStackTrace();
- } finally {
- return sb.toString();
- }
- }
-
private String getPartitionSpec(Path path, int level) {
Stack st = new Stack();
Path p = path;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index 532a49b..004db03 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -1055,12 +1055,6 @@ protected void forward(Object[] keys,
public void closeOp(boolean abort) throws HiveException {
if (!abort) {
try {
- // put the hash related stats in statsMap if applicable, so that they
- // are sent to jt as counters
- if (hashAggr && counterNameToEnum != null) {
- incrCounter(counterNameHashOut, numRowsHashTbl);
- }
-
// If there is no grouping key and no row came to this operator
if (firstRow && (keyFields.length == 0)) {
firstRow = false;
@@ -1120,13 +1114,6 @@ public void closeOp(boolean abort) throws HiveException {
}
}
- @Override
- protected List getAdditionalCounters() {
- List ctrList = new ArrayList();
- ctrList.add(getWrappedCounterName(counterNameHashOut));
- return ctrList;
- }
-
// Group by contains the columns needed - no need to aggregate from children
public List genColLists(
HashMap, OpParseContext> opParseCtx) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
index fb6ecb4..ad8f09a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java
@@ -168,10 +168,6 @@ public static MapJoinMetaData getMetadata() {
return metadata;
}
- private static final transient String[] FATAL_ERR_MSG = {
- null, // counter value 0 means no error
- "Mapside join exceeds available memory. "
- + "Please try removing the mapjoin hint."};
private final int metadataKeyTag = -1;
transient int[] metadataValueTag;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
index 4da1be8..9324c71 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
@@ -59,11 +59,6 @@ public static MapJoinMetaData getMetadata() {
return metadata;
}
- private static final transient String[] FATAL_ERR_MSG = {
- null, // counter value 0 means no error
- "Mapside join exceeds available memory. "
- + "Please try removing the mapjoin hint."};
-
protected transient MapJoinRowContainer>[] rowContainerMap;
transient int metadataKeyTag;
transient int[] metadataValueTag;
@@ -109,12 +104,6 @@ protected void initializeOp(Configuration hconf) throws HiveException {
hashTblInitedOnce = false;
}
- @Override
- protected void fatalErrorMessage(StringBuilder errMsg, long counterCode) {
- errMsg.append("Operator " + getOperatorId() + " (id=" + id + "): "
- + FATAL_ERR_MSG[(int) counterCode]);
- }
-
public void generateMapMetaData() throws HiveException, SerDeException {
// generate the meta data for key
// index for key is -1
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 95f1e2c..1149493 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
@@ -61,22 +60,7 @@
protected List> childOperators;
protected List> parentOperators;
protected String operatorId;
- /**
- * List of counter names associated with the operator. It contains the
- * following default counters NUM_INPUT_ROWS NUM_OUTPUT_ROWS TIME_TAKEN
- * Individual operators can add to this list via addToCounterNames methods.
- */
- protected ArrayList counterNames;
-
- /**
- * Each operator has its own map of its counter names to disjoint
- * ProgressCounter - it is populated at compile time and is read in at
- * run-time while extracting the operator specific counts.
- */
- protected HashMap counterNameToEnum;
-
private transient ExecMapperContext execContext;
-
private static int seqId;
// It can be optimized later so that an operator operator (init/close) is performed
@@ -100,9 +84,6 @@
protected transient State state = State.UNINIT;
- static transient boolean fatalError = false; // fatalError is shared acorss
- // all operators
-
static {
seqId = 0;
}
@@ -111,6 +92,7 @@
public Operator() {
id = String.valueOf(seqId++);
+ initOperatorId();
}
public static void resetId() {
@@ -187,10 +169,30 @@ public T getConf() {
}
public boolean getDone() {
- return done || fatalError;
+ return done;
+ }
+
+ private final void setChildIsDone(Operator extends OperatorDesc> op) {
+ this.removeChildFromProcessing(op);
+ if (this.childOperatorsArray.length == 0) {
+ setDone(true);
+ }
}
- public void setDone(boolean done) {
+ @SuppressWarnings("unchecked")
+ protected final void setDone(boolean done) {
+ if (done && !this.done) {
+ // no more forwarding
+ childOperatorsArray = new Operator[0];
+ childOperatorsTag = new int[0];
+
+ // tell parents we're done
+ if (getParentOperators() != null) {
+ for (Operator extends OperatorDesc> op: getParentOperators()) {
+ op.setChildIsDone(this);
+ }
+ }
+ }
this.done = done;
}
@@ -208,6 +210,7 @@ public RowSchema getSchema() {
// non-bean ..
protected transient HashMap, LongWritable> statsMap = new HashMap, LongWritable>();
+ @SuppressWarnings("rawtypes")
protected transient OutputCollector out;
protected transient Log LOG = LogFactory.getLog(this.getClass().getName());
protected transient boolean isLogInfoEnabled = LOG.isInfoEnabled();
@@ -254,6 +257,7 @@ public void setReporter(Reporter rep) {
}
}
+ @SuppressWarnings("rawtypes")
public void setOutputCollector(OutputCollector out) {
this.out = out;
@@ -322,6 +326,7 @@ protected boolean areAllParentsInitialized() {
* ignored.
* @throws HiveException
*/
+ @SuppressWarnings("unchecked")
public void initialize(Configuration hconf, ObjectInspector[] inputOIs)
throws HiveException {
if (state == State.INIT) {
@@ -362,6 +367,9 @@ public void initialize(Configuration hconf, ObjectInspector[] inputOIs)
"Hive internal error: cannot find parent in the child operator!");
}
}
+ } else {
+ childOperatorsArray = new Operator[0];
+ childOperatorsTag = new int[0];
}
if (inputObjInspectors.length == 0) {
@@ -472,38 +480,6 @@ public void setInputObjInspectors(ObjectInspector[] inputObjInspectors) {
*/
public abstract void processOp(Object row, int tag) throws HiveException;
- /**
- * Process the row.
- *
- * @param row
- * The object representing the row.
- * @param tag
- * The tag of the row usually means which parent this row comes from.
- * Rows with the same tag should have exactly the same rowInspector
- * all the time.
- */
- public void process(Object row, int tag) throws HiveException {
- if (fatalError) {
- return;
- }
-
- if (counterNameToEnum != null) {
- inputRows++;
- if ((inputRows % 1000) == 0) {
- incrCounter(numInputRowsCntr, inputRows);
- incrCounter(timeTakenCntr, totalTime);
- inputRows = 0;
- totalTime = 0;
- }
-
- beginTime = System.currentTimeMillis();
- processOp(row, tag);
- totalTime += (System.currentTimeMillis() - beginTime);
- } else {
- processOp(row, tag);
- }
- }
-
// If a operator wants to do some work at the beginning of a group
public void startGroup() throws HiveException {
LOG.debug("Starting group");
@@ -512,10 +488,6 @@ public void startGroup() throws HiveException {
return;
}
- if (fatalError) {
- return;
- }
-
LOG.debug("Starting group for children:");
for (Operator extends OperatorDesc> op : childOperators) {
op.startGroup();
@@ -532,10 +504,6 @@ public void endGroup() throws HiveException {
return;
}
- if (fatalError) {
- return;
- }
-
LOG.debug("Ending group for children:");
for (Operator extends OperatorDesc> op : childOperators) {
op.endGroup();
@@ -577,14 +545,6 @@ public void close(boolean abort) throws HiveException {
state = State.CLOSE;
LOG.info(id + " finished. closing... ");
- if (counterNameToEnum != null) {
- incrCounter(numInputRowsCntr, inputRows);
- incrCounter(numOutputRowsCntr, outputRows);
- incrCounter(timeTakenCntr, totalTime);
- }
-
- LOG.info(id + " forwarded " + cntr + " rows");
-
// call the operator specific close routine
closeOp(abort);
@@ -652,10 +612,6 @@ public void jobClose(Configuration conf, boolean success, JobCloseFeedBack feedB
protected transient Operator extends OperatorDesc>[] childOperatorsArray = null;
protected transient int[] childOperatorsTag;
- // counters for debugging
- private transient long cntr = 0;
- private transient long nextCntr = 1;
-
/**
* Replace one child with another at the same position. The parent of the
* child is not changed
@@ -672,6 +628,22 @@ public void replaceChild(Operator extends OperatorDesc> child,
childOperators.set(childIndex, newChild);
}
+ private void removeChildFromProcessing(Operator extends OperatorDesc> op) {
+ @SuppressWarnings("unchecked")
+ Operator extends OperatorDesc>[] processingArray = new Operator[childOperatorsArray.length-1];
+ int[] processingTags = new int[childOperatorsArray.length-1];
+ int p = 0, q = 0;
+ for (; p < childOperatorsArray.length; ++p) {
+ if (childOperatorsArray[p] != op) {
+ processingArray[q] = childOperatorsArray[p];
+ processingTags[q] = childOperatorsTag[p];
+ ++q;
+ }
+ }
+ childOperatorsArray = processingArray;
+ childOperatorsTag = processingTags;
+ }
+
public void removeChild(Operator extends OperatorDesc> child) {
int childIndex = childOperators.indexOf(child);
assert childIndex != -1;
@@ -780,63 +752,17 @@ public void replaceParent(Operator extends OperatorDesc> parent,
parentOperators.set(parentIndex, newParent);
}
- private long getNextCntr(long cntr) {
- // A very simple counter to keep track of number of rows processed by an
- // operator. It dumps
- // every 1 million times, and quickly before that
- if (cntr >= 1000000) {
- return cntr + 1000000;
- }
-
- return 10 * cntr;
- }
-
protected void forward(Object row, ObjectInspector rowInspector)
throws HiveException {
- if (counterNameToEnum != null) {
- if ((++outputRows % 1000) == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
-
- if (isLogInfoEnabled) {
- cntr++;
- if (cntr == nextCntr) {
- LOG.info(id + " forwarding " + cntr + " rows");
- nextCntr = getNextCntr(cntr);
- }
- }
-
// For debugging purposes:
// System.out.println("" + this.getClass() + ": " +
// SerDeUtils.getJSONString(row, rowInspector));
// System.out.println("" + this.getClass() + ">> " +
// ObjectInspectorUtils.getObjectInspectorName(rowInspector));
- if (childOperatorsArray == null && childOperators != null) {
- throw new HiveException(
- "Internal Hive error during operator initialization.");
- }
-
- if ((childOperatorsArray == null) || (getDone())) {
- return;
- }
-
- int childrenDone = 0;
for (int i = 0; i < childOperatorsArray.length; i++) {
- Operator extends OperatorDesc> o = childOperatorsArray[i];
- if (o.getDone()) {
- childrenDone++;
- } else {
- o.process(row, childOperatorsTag[i]);
- }
- }
-
- // if all children are done, this operator is also done
- if (childrenDone == childOperatorsArray.length) {
- setDone(true);
+ childOperatorsArray[i].processOp(row, childOperatorsTag[i]);
}
}
@@ -1000,174 +926,12 @@ protected static StructObjectInspector initEvaluatorsAndReturnStruct(
outputColName, Arrays.asList(fieldObjectInspectors));
}
- /**
- * All counter stuff below this
- */
-
- /**
- * TODO This is a hack for hadoop 0.17 which only supports enum counters.
- */
- public static enum ProgressCounter {
- CREATED_FILES,
- C1, C2, C3, C4, C5, C6, C7, C8, C9, C10,
- C11, C12, C13, C14, C15, C16, C17, C18, C19, C20,
- C21, C22, C23, C24, C25, C26, C27, C28, C29, C30,
- C31, C32, C33, C34, C35, C36, C37, C38, C39, C40,
- C41, C42, C43, C44, C45, C46, C47, C48, C49, C50,
- C51, C52, C53, C54, C55, C56, C57, C58, C59, C60,
- C61, C62, C63, C64, C65, C66, C67, C68, C69, C70,
- C71, C72, C73, C74, C75, C76, C77, C78, C79, C80,
- C81, C82, C83, C84, C85, C86, C87, C88, C89, C90,
- C91, C92, C93, C94, C95, C96, C97, C98, C99, C100,
- C101, C102, C103, C104, C105, C106, C107, C108, C109, C110,
- C111, C112, C113, C114, C115, C116, C117, C118, C119, C120,
- C121, C122, C123, C124, C125, C126, C127, C128, C129, C130,
- C131, C132, C133, C134, C135, C136, C137, C138, C139, C140,
- C141, C142, C143, C144, C145, C146, C147, C148, C149, C150,
- C151, C152, C153, C154, C155, C156, C157, C158, C159, C160,
- C161, C162, C163, C164, C165, C166, C167, C168, C169, C170,
- C171, C172, C173, C174, C175, C176, C177, C178, C179, C180,
- C181, C182, C183, C184, C185, C186, C187, C188, C189, C190,
- C191, C192, C193, C194, C195, C196, C197, C198, C199, C200,
- C201, C202, C203, C204, C205, C206, C207, C208, C209, C210,
- C211, C212, C213, C214, C215, C216, C217, C218, C219, C220,
- C221, C222, C223, C224, C225, C226, C227, C228, C229, C230,
- C231, C232, C233, C234, C235, C236, C237, C238, C239, C240,
- C241, C242, C243, C244, C245, C246, C247, C248, C249, C250,
- C251, C252, C253, C254, C255, C256, C257, C258, C259, C260,
- C261, C262, C263, C264, C265, C266, C267, C268, C269, C270,
- C271, C272, C273, C274, C275, C276, C277, C278, C279, C280,
- C281, C282, C283, C284, C285, C286, C287, C288, C289, C290,
- C291, C292, C293, C294, C295, C296, C297, C298, C299, C300,
- C301, C302, C303, C304, C305, C306, C307, C308, C309, C310,
- C311, C312, C313, C314, C315, C316, C317, C318, C319, C320,
- C321, C322, C323, C324, C325, C326, C327, C328, C329, C330,
- C331, C332, C333, C334, C335, C336, C337, C338, C339, C340,
- C341, C342, C343, C344, C345, C346, C347, C348, C349, C350,
- C351, C352, C353, C354, C355, C356, C357, C358, C359, C360,
- C361, C362, C363, C364, C365, C366, C367, C368, C369, C370,
- C371, C372, C373, C374, C375, C376, C377, C378, C379, C380,
- C381, C382, C383, C384, C385, C386, C387, C388, C389, C390,
- C391, C392, C393, C394, C395, C396, C397, C398, C399, C400,
- C401, C402, C403, C404, C405, C406, C407, C408, C409, C410,
- C411, C412, C413, C414, C415, C416, C417, C418, C419, C420,
- C421, C422, C423, C424, C425, C426, C427, C428, C429, C430,
- C431, C432, C433, C434, C435, C436, C437, C438, C439, C440,
- C441, C442, C443, C444, C445, C446, C447, C448, C449, C450,
- C451, C452, C453, C454, C455, C456, C457, C458, C459, C460,
- C461, C462, C463, C464, C465, C466, C467, C468, C469, C470,
- C471, C472, C473, C474, C475, C476, C477, C478, C479, C480,
- C481, C482, C483, C484, C485, C486, C487, C488, C489, C490,
- C491, C492, C493, C494, C495, C496, C497, C498, C499, C500,
- C501, C502, C503, C504, C505, C506, C507, C508, C509, C510,
- C511, C512, C513, C514, C515, C516, C517, C518, C519, C520,
- C521, C522, C523, C524, C525, C526, C527, C528, C529, C530,
- C531, C532, C533, C534, C535, C536, C537, C538, C539, C540,
- C541, C542, C543, C544, C545, C546, C547, C548, C549, C550,
- C551, C552, C553, C554, C555, C556, C557, C558, C559, C560,
- C561, C562, C563, C564, C565, C566, C567, C568, C569, C570,
- C571, C572, C573, C574, C575, C576, C577, C578, C579, C580,
- C581, C582, C583, C584, C585, C586, C587, C588, C589, C590,
- C591, C592, C593, C594, C595, C596, C597, C598, C599, C600,
- C601, C602, C603, C604, C605, C606, C607, C608, C609, C610,
- C611, C612, C613, C614, C615, C616, C617, C618, C619, C620,
- C621, C622, C623, C624, C625, C626, C627, C628, C629, C630,
- C631, C632, C633, C634, C635, C636, C637, C638, C639, C640,
- C641, C642, C643, C644, C645, C646, C647, C648, C649, C650,
- C651, C652, C653, C654, C655, C656, C657, C658, C659, C660,
- C661, C662, C663, C664, C665, C666, C667, C668, C669, C670,
- C671, C672, C673, C674, C675, C676, C677, C678, C679, C680,
- C681, C682, C683, C684, C685, C686, C687, C688, C689, C690,
- C691, C692, C693, C694, C695, C696, C697, C698, C699, C700,
- C701, C702, C703, C704, C705, C706, C707, C708, C709, C710,
- C711, C712, C713, C714, C715, C716, C717, C718, C719, C720,
- C721, C722, C723, C724, C725, C726, C727, C728, C729, C730,
- C731, C732, C733, C734, C735, C736, C737, C738, C739, C740,
- C741, C742, C743, C744, C745, C746, C747, C748, C749, C750,
- C751, C752, C753, C754, C755, C756, C757, C758, C759, C760,
- C761, C762, C763, C764, C765, C766, C767, C768, C769, C770,
- C771, C772, C773, C774, C775, C776, C777, C778, C779, C780,
- C781, C782, C783, C784, C785, C786, C787, C788, C789, C790,
- C791, C792, C793, C794, C795, C796, C797, C798, C799, C800,
- C801, C802, C803, C804, C805, C806, C807, C808, C809, C810,
- C811, C812, C813, C814, C815, C816, C817, C818, C819, C820,
- C821, C822, C823, C824, C825, C826, C827, C828, C829, C830,
- C831, C832, C833, C834, C835, C836, C837, C838, C839, C840,
- C841, C842, C843, C844, C845, C846, C847, C848, C849, C850,
- C851, C852, C853, C854, C855, C856, C857, C858, C859, C860,
- C861, C862, C863, C864, C865, C866, C867, C868, C869, C870,
- C871, C872, C873, C874, C875, C876, C877, C878, C879, C880,
- C881, C882, C883, C884, C885, C886, C887, C888, C889, C890,
- C891, C892, C893, C894, C895, C896, C897, C898, C899, C900,
- C901, C902, C903, C904, C905, C906, C907, C908, C909, C910,
- C911, C912, C913, C914, C915, C916, C917, C918, C919, C920,
- C921, C922, C923, C924, C925, C926, C927, C928, C929, C930,
- C931, C932, C933, C934, C935, C936, C937, C938, C939, C940,
- C941, C942, C943, C944, C945, C946, C947, C948, C949, C950,
- C951, C952, C953, C954, C955, C956, C957, C958, C959, C960,
- C961, C962, C963, C964, C965, C966, C967, C968, C969, C970,
- C971, C972, C973, C974, C975, C976, C977, C978, C979, C980,
- C981, C982, C983, C984, C985, C986, C987, C988, C989, C990,
- C991, C992, C993, C994, C995, C996, C997, C998, C999, C1000
- };
-
- private static int totalNumCntrs = 1000;
-
- /**
- * populated at runtime from hadoop counters at run time in the client.
- */
- protected transient HashMap counters;
-
- /**
- * keeps track of unique ProgressCounter enums used this value is used at
- * compile time while assigning ProgressCounter enums to counter names.
- */
- private static int lastEnumUsed;
-
- protected transient long inputRows = 0;
- protected transient long outputRows = 0;
- protected transient long beginTime = 0;
- protected transient long totalTime = 0;
-
protected transient Object groupKeyObject;
- /**
- * this is called in operators in map or reduce tasks.
- *
- * @param name
- * @param amount
- */
- protected void incrCounter(String name, long amount) {
- String counterName = getWrappedCounterName(name);
- ProgressCounter pc = counterNameToEnum.get(counterName);
-
- // Currently, we maintain fixed number of counters per plan - in case of a
- // bigger tree, we may run out of them
- if (pc == null) {
- LOG
- .warn("Using too many counters. Increase the total number of counters for "
- + counterName);
- } else if (reporter != null) {
- reporter.incrCounter(pc, amount);
- }
- }
-
- public ArrayList getCounterNames() {
- return counterNames;
- }
-
- public void setCounterNames(ArrayList counterNames) {
- this.counterNames = counterNames;
- }
-
public String getOperatorId() {
return operatorId;
}
- public final String getWrappedCounterName(String ctrName) {
- return String.format(counterNameFormat, getOperatorId(), ctrName);
- }
-
public void initOperatorId() {
setOperatorId(getName() + "_" + this.id);
}
@@ -1176,145 +940,6 @@ public void setOperatorId(String operatorId) {
this.operatorId = operatorId;
}
- public HashMap getCounters() {
- return counters;
- }
-
- /**
- * called in ExecDriver.progress periodically.
- *
- * @param ctrs
- * counters from the running job
- */
- @SuppressWarnings("unchecked")
- public void updateCounters(Counters ctrs) {
- if (counters == null) {
- counters = new HashMap();
- }
-
- // For some old unit tests, the counters will not be populated. Eventually,
- // the old tests should be removed
- if (counterNameToEnum == null) {
- return;
- }
-
- for (Map.Entry counter : counterNameToEnum
- .entrySet()) {
- counters.put(counter.getKey(), ctrs.getCounter(counter.getValue()));
- }
- // update counters of child operators
- // this wont be an infinite loop since the operator graph is acyclic
- // but, some operators may be updated more than once and that's ok
- if (getChildren() != null) {
- for (Node op : getChildren()) {
- ((Operator extends OperatorDesc>) op).updateCounters(ctrs);
- }
- }
- }
-
- /**
- * Recursively check this operator and its descendants to see if the fatal
- * error counter is set to non-zero.
- *
- * @param ctrs
- */
- public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
- if (counterNameToEnum == null) {
- return false;
- }
-
- String counterName = getWrappedCounterName(fatalErrorCntr);
- ProgressCounter pc = counterNameToEnum.get(counterName);
-
- // Currently, we maintain fixed number of counters per plan - in case of a
- // bigger tree, we may run out of them
- if (pc == null) {
- LOG
- .warn("Using too many counters. Increase the total number of counters for "
- + counterName);
- } else {
- long value = ctrs.getCounter(pc);
- fatalErrorMessage(errMsg, value);
- if (value != 0) {
- return true;
- }
- }
-
- if (getChildren() != null) {
- for (Node op : getChildren()) {
- if (((Operator extends OperatorDesc>) op).checkFatalErrors(ctrs,
- errMsg)) {
- return true;
- }
- }
- }
- return false;
- }
-
- /**
- * Get the fatal error message based on counter's code.
- *
- * @param errMsg
- * error message should be appended to this output parameter.
- * @param counterValue
- * input counter code.
- */
- protected void fatalErrorMessage(StringBuilder errMsg, long counterValue) {
- }
-
- // A given query can have multiple map-reduce jobs
- public static void resetLastEnumUsed() {
- lastEnumUsed = 0;
- }
-
- /**
- * Called only in SemanticAnalyzer after all operators have added their own
- * set of counter names.
- */
- public void assignCounterNameToEnum() {
- if (counterNameToEnum != null) {
- return;
- }
- counterNameToEnum = new HashMap();
- for (String counterName : getCounterNames()) {
- ++lastEnumUsed;
-
- // TODO Hack for hadoop-0.17
- // Currently, only maximum number of 'totalNumCntrs' can be used. If you
- // want
- // to add more counters, increase the number of counters in
- // ProgressCounter
- if (lastEnumUsed > totalNumCntrs) {
- LOG
- .warn("Using too many counters. Increase the total number of counters");
- return;
- }
- String enumName = "C" + lastEnumUsed;
- ProgressCounter ctr = ProgressCounter.valueOf(enumName);
- counterNameToEnum.put(counterName, ctr);
- }
- }
-
- protected static String numInputRowsCntr = "NUM_INPUT_ROWS";
- protected static String numOutputRowsCntr = "NUM_OUTPUT_ROWS";
- protected static String timeTakenCntr = "TIME_TAKEN";
- protected static String fatalErrorCntr = "FATAL_ERROR";
- private static String counterNameFormat = "CNTR_NAME_%s_%s";
-
- public void initializeCounters() {
- initOperatorId();
- counterNames = new ArrayList();
- counterNames.add(getWrappedCounterName(numInputRowsCntr));
- counterNames.add(getWrappedCounterName(numOutputRowsCntr));
- counterNames.add(getWrappedCounterName(timeTakenCntr));
- counterNames.add(getWrappedCounterName(fatalErrorCntr));
- /* getAdditionalCounter should return Wrapped counters */
- List newCntrs = getAdditionalCounters();
- if (newCntrs != null) {
- counterNames.addAll(newCntrs);
- }
- }
-
/*
* By default, the list is empty - if an operator wants to add more counters,
* it should override this method and provide the new list. Counter names returned
@@ -1325,15 +950,6 @@ public void initializeCounters() {
return null;
}
- public HashMap getCounterNameToEnum() {
- return counterNameToEnum;
- }
-
- public void setCounterNameToEnum(
- HashMap counterNameToEnum) {
- this.counterNameToEnum = counterNameToEnum;
- }
-
/**
* Return the type of the specific operator among the
* types in OperatorType.
@@ -1407,6 +1023,7 @@ public boolean supportSkewJoinOptimization() {
}
}
+ @SuppressWarnings("unchecked")
T descClone = (T)conf.clone();
Operator extends OperatorDesc> ret =
(Operator extends OperatorDesc>) OperatorFactory.getAndMakeChild(
@@ -1481,6 +1098,7 @@ public boolean opAllowedBeforeSortMergeJoin() {
return true;
}
+ @Override
public String toString() {
return getName() + "[" + getIdentifier() + "]";
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index bbbcfbf..01f03de 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -106,7 +106,6 @@ public OpTuple(Class descClass, Class extends Operator> opClass) {
if (o.descClass == opClass) {
try {
Operator op = (Operator) o.opClass.newInstance();
- op.initializeCounters();
return op;
} catch (Exception e) {
e.printStackTrace();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 6a538e8..d5f489f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -296,15 +296,6 @@ public void processOp(Object row, int tag) throws HiveException {
keyWritable.setHashCode(keyHashCode);
if (out != null) {
out.collect(keyWritable, value);
- // Since this is a terminal operator, update counters explicitly -
- // forward is not called
- if (counterNameToEnum != null) {
- ++outputRows;
- if (outputRows % 1000 == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
}
}
} catch (SerDeException e) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 1a784b2..c71aec2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -554,7 +554,7 @@ private void fetchOneRow(byte tag) {
fetchDone[tag] = true;
return;
}
- forwardOp.process(row.o, tag);
+ forwardOp.processOp(row.o, tag);
// check if any operator had a fatal error or early exit during
// execution
if (forwardOp.getDone()) {
@@ -799,7 +799,7 @@ private boolean next(Integer current) throws IOException, HiveException {
// Pass the row though the operator tree. It is guaranteed that not more than 1 row can
// be produced from a input row.
- forwardOp.process(nextRow.o, 0);
+ forwardOp.processOp(nextRow.o, 0);
nextRow = sinkOp.getResult();
// It is possible that the row got absorbed in the operator tree.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index b789d78..1537c31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -453,7 +453,6 @@ public static void serializeTasks(Task extends Serializable> t, OutputStream o
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
- e.setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate());
e.writeObject(t);
} finally {
@@ -492,7 +491,6 @@ public void exceptionThrown(Exception e) {
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
- e.setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate());
e.setPersistenceDelegate(org.datanucleus.sco.backed.Map.class, new MapDelegate());
e.setPersistenceDelegate(org.datanucleus.sco.backed.List.class, new ListDelegate());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 9fbabae..a0d391e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -195,18 +195,15 @@ public ExecDriver(MapredWork plan, JobConf job, boolean isSilent) throws HiveExc
*
* @return true if fatal errors happened during job execution, false otherwise.
*/
+ @SuppressWarnings("deprecation")
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
- for (Operator extends OperatorDesc> op : work.getAliasToWork().values()) {
- if (op.checkFatalErrors(ctrs, errMsg)) {
- return true;
- }
- }
- if (work.getReducer() != null) {
- if (work.getReducer().checkFatalErrors(ctrs, errMsg)) {
- return true;
- }
+ Counters.Counter cntr = ctrs.findCounter(
+ HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERFATAL));
+ if (cntr == null || cntr.getValue() == 0) {
+ return false;
}
- return false;
+ return true;
}
protected void createTmpDirs() throws IOException {
@@ -1001,16 +998,6 @@ public String getName() {
}
@Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- for (Operator extends OperatorDesc> op : work.getAliasToWork().values()) {
- op.updateCounters(ctrs);
- }
- if (work.getReducer() != null) {
- work.getReducer().updateCounters(ctrs);
- }
- }
-
- @Override
public void logPlanProgress(SessionState ss) throws IOException {
ss.getHiveHistory().logPlanProgress(queryPlan);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
index 444e29b..e4f20cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java
@@ -28,6 +28,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.ql.exec.FetchOperator;
import org.apache.hadoop.hive.ql.exec.MapOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
@@ -170,10 +172,16 @@ public void map(Object key, Object value, OutputCollector output,
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
- } else {
- l4j.fatal(StringUtils.stringifyException(e));
- throw new RuntimeException(e);
+ } else if (e instanceof HiveFatalException) {
+ if (rp != null) {
+ rp.incrCounter(HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERFATAL),
+ 1);
+ }
}
+
+ l4j.fatal(StringUtils.stringifyException(e));
+ throw new RuntimeException(e);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
index 82fd3d5..473642f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java
@@ -25,9 +25,12 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.exec.MapredContext;
import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -259,7 +262,7 @@ public void reduce(Object key, Iterator values, OutputCollector output,
}
}
try {
- reducer.process(row, tag.get());
+ reducer.processOp(row, tag.get());
} catch (Exception e) {
String rowString = null;
try {
@@ -278,10 +281,16 @@ public void reduce(Object key, Iterator values, OutputCollector output,
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
- } else {
- l4j.fatal(StringUtils.stringifyException(e));
- throw new RuntimeException(e);
+ } else if (e instanceof HiveFatalException) {
+ if (rp != null) {
+ rp.incrCounter(HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERFATAL),
+ 1);
+ }
}
+
+ l4j.fatal(StringUtils.stringifyException(e));
+ throw new RuntimeException(e);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
index cd872b2..7c575b2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
@@ -34,11 +34,11 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.MapRedStats;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskHandle;
import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.Operator.ProgressCounter;
import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
import org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -84,9 +84,6 @@ private void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
// we may still be able to retrieve the job status - so ignore
return;
}
- if(callBackObj != null) {
- callBackObj.updateCounters(ctrs, rj);
- }
}
/**
@@ -197,6 +194,7 @@ public static void killRunningJobs() {
}
}
+ @SuppressWarnings("deprecation")
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
if (ctrs == null) {
// hadoop might return null if it cannot locate the job.
@@ -204,7 +202,9 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
return false;
}
// check for number of created files
- long numFiles = ctrs.getCounter(ProgressCounter.CREATED_FILES);
+ Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(job, ConfVars.HIVECOUNTERCREATEDFILES));
+ long numFiles = cntr != null ? cntr.getValue() : 0;
long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES);
if (numFiles > upperLimit) {
errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java
index 02ff977..d529e97 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java
@@ -22,13 +22,10 @@
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.RunningJob;
@SuppressWarnings("deprecation")
public interface HadoopJobExecHook {
-
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException;
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg);
public void logPlanProgress(SessionState ss) throws IOException;
-
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
index f72ecfb..7e81c55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
@@ -352,7 +352,7 @@ private void startForward(boolean inputFileChangeSenstive, String bigTableBucket
break;
}
fetchOpRows++;
- forwardOp.process(row.o, 0);
+ forwardOp.processOp(row.o, 0);
// check if any operator had a fatal error or early exit during
// execution
if (forwardOp.getDone()) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
index ad14966..7f91f72 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
@@ -372,10 +372,5 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
@Override
public void logPlanProgress(SessionState ss) throws IOException {
// no op
- }
-
- @Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index f66b82e..0e85709 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -374,9 +374,4 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
public void logPlanProgress(SessionState ss) throws IOException {
// no op
}
-
- @Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
index 6beb54d..dcd5b72 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
@@ -249,9 +249,4 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
public void logPlanProgress(SessionState ss) throws IOException {
// no op
}
-
- @Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
new file mode 100644
index 0000000..9de5b2b
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
@@ -0,0 +1,20 @@
+package org.apache.hadoop.hive.ql.metadata;
+
+public class HiveFatalException extends HiveException {
+
+ public HiveFatalException() {
+ }
+
+ public HiveFatalException(String message) {
+ super(message);
+ }
+
+ public HiveFatalException(Throwable cause) {
+ super(cause);
+ }
+
+ public HiveFatalException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java
index fb2b537..490a7c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java
@@ -291,13 +291,6 @@ public void compile(final ParseContext pCtx, final List rootTask : rootTasks) {
- generateCountersTask(rootTask);
- }
- }
-
decideExecMode(rootTasks, ctx, globalLimitCtx);
if (qb.isCTAS()) {
@@ -388,55 +381,6 @@ private void setInputFormat(Task extends Serializable> task) {
}
}
- // loop over all the tasks recursively
- private void generateCountersTask(Task extends Serializable> task) {
- if (task instanceof ExecDriver) {
- HashMap> opMap = ((MapredWork) task
- .getWork()).getAliasToWork();
- if (!opMap.isEmpty()) {
- for (Operator extends OperatorDesc> op : opMap.values()) {
- generateCountersOperator(op);
- }
- }
-
- Operator extends OperatorDesc> reducer = ((MapredWork) task.getWork())
- .getReducer();
- if (reducer != null) {
- LOG.info("Generating counters for operator " + reducer);
- generateCountersOperator(reducer);
- }
- } else if (task instanceof ConditionalTask) {
- List> listTasks = ((ConditionalTask) task)
- .getListTasks();
- for (Task extends Serializable> tsk : listTasks) {
- generateCountersTask(tsk);
- }
- }
-
- // Start the counters from scratch - a hack for hadoop 17.
- Operator.resetLastEnumUsed();
-
- if (task.getChildTasks() == null) {
- return;
- }
-
- for (Task extends Serializable> childTask : task.getChildTasks()) {
- generateCountersTask(childTask);
- }
- }
-
- private void generateCountersOperator(Operator extends OperatorDesc> op) {
- op.assignCounterNameToEnum();
-
- if (op.getChildOperators() == null) {
- return;
- }
-
- for (Operator extends OperatorDesc> child : op.getChildOperators()) {
- generateCountersOperator(child);
- }
- }
-
public ParseContext getParseContext(ParseContext pCtx, List> rootTasks) {
return new ParseContext(conf, pCtx.getQB(), pCtx.getParseTree(),
pCtx.getOpToPartPruner(), pCtx.getOpToPartList(), pCtx.getTopOps(),
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index e10838a..e446b5f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -4938,11 +4938,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input)
qbm.setDPCtx(dest, dpCtx);
}
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
- // turn on hive.task.progress to update # of partitions created to the JT
- HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS, true);
-
- } else { // QBMetaData.DEST_PARTITION capture the all-SP case
+ if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
throw new SemanticException(generateErrorMessage(
qb.getParseInfo().getDestForClause(dest),
ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg()));
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index 79bed09..75801dc 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -116,7 +116,7 @@ public void testBaseFilterOperator() throws Throwable {
new ObjectInspector[] {r[0].oi});
for (InspectableObject oner : r) {
- op.process(oner.o, 0);
+ op.processOp(oner.o, 0);
}
Map, Long> results = op.getStats();
@@ -277,7 +277,7 @@ public void testScriptOperator() throws Throwable {
// evaluate on row
for (int i = 0; i < 5; i++) {
- op.process(r[i].o, 0);
+ op.processOp(r[i].o, 0);
}
op.close(false);
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java
deleted file mode 100644
index 828de5e..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.Set;
-import java.io.Serializable;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
-
-public class OptrStatGroupByHook implements ExecuteWithHookContext {
-
- public void run(HookContext hookContext) {
- HiveConf conf = hookContext.getConf();
-
- List completedTasks = hookContext.getCompleteTaskList();
-
- boolean enableProgress = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS);
-
- /** For each task visit the opeartor tree and and if the operator is GROUPBY
- * then print the HASH_OUT Optr level stat value.
- **/
- if (completedTasks != null) {
- for (TaskRunner taskRunner : completedTasks) {
- Task extends Serializable> task = taskRunner.getTask();
- if (task.isMapRedTask() && !task.isMapRedLocalTask()) {
- Set> optrSet = getOptrsForTask(task);
- for (Operator extends OperatorDesc> optr : optrSet) {
- if (optr.getType() == OperatorType.GROUPBY) {
- printCounterValue(optr.getCounters());
- }
- }
- }
- }
- }
- }
-
- private void printCounterValue(HashMap ctrs) {
- for (String ctrName : ctrs.keySet()) {
- if (ctrName.contains("HASH_OUT")) {
- SessionState.getConsole().printError(ctrName+"="+ctrs.get(ctrName));
- }
- }
- }
-
- private Set> getOptrsForTask(
- Task extends Serializable> task) {
-
- Collection> topOptrs = task.getTopOperators();
- Set> allOptrs =
- new HashSet>();
- Queue> opsToVisit =
- new LinkedList>();
- if(topOptrs != null) {
- opsToVisit.addAll(topOptrs);
- addChildOptrs(opsToVisit, allOptrs);
- }
-
- return allOptrs;
- }
-
- private void addChildOptrs(
- Queue> opsToVisit,
- Set> opsVisited) {
-
- if(opsToVisit == null || opsVisited == null) {
- return;
- }
-
- while (opsToVisit.peek() != null) {
- Operator extends OperatorDesc> op = opsToVisit.remove();
- opsVisited.add(op);
- if (op.getChildOperators() != null) {
- for (Operator extends OperatorDesc> childOp : op.getChildOperators()) {
- if (!opsVisited.contains(childOp)) {
- opsToVisit.add(childOp);
- }
- }
- }
- }
- }
-}
diff --git a/ql/src/test/queries/clientpositive/insert_into3.q b/ql/src/test/queries/clientpositive/insert_into3.q
index e58b212..4ff0edc 100644
--- a/ql/src/test/queries/clientpositive/insert_into3.q
+++ b/ql/src/test/queries/clientpositive/insert_into3.q
@@ -4,10 +4,10 @@ DROP TABLE insert_into3b;
CREATE TABLE insert_into3a (key int, value string);
CREATE TABLE insert_into3b (key int, value string);
-EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
-FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
+EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100;
+FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100;
SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
) t;
diff --git a/ql/src/test/queries/clientpositive/optrstat_groupby.q b/ql/src/test/queries/clientpositive/optrstat_groupby.q
deleted file mode 100644
index 5993041..0000000
--- a/ql/src/test/queries/clientpositive/optrstat_groupby.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.OptrStatGroupByHook;
-SET hive.exec.mode.local.auto=false;
-SET hive.task.progress=true;
--- This test executes the OptrStatGroupBy hook which prints the optr level
--- stats of GROUPBY optr present is the plan of below query
-SELECT count(1) FROM src;
diff --git a/ql/src/test/results/clientpositive/insert_into3.q.out b/ql/src/test/results/clientpositive/insert_into3.q.out
index 639e2a7..b86fc90 100644
--- a/ql/src/test/results/clientpositive/insert_into3.q.out
+++ b/ql/src/test/results/clientpositive/insert_into3.q.out
@@ -16,22 +16,24 @@ PREHOOK: type: CREATETABLE
POSTHOOK: query: CREATE TABLE insert_into3b (key int, value string)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: default@insert_into3b
-PREHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+PREHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+POSTHOOK: query: EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
POSTHOOK: type: QUERY
ABSTRACT SYNTAX TREE:
- (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3a))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 50)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3b))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_LIMIT 100)))
+ (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3a))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 50)) (TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME insert_into3b))) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF)) (TOK_ORDERBY (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL key)) (TOK_TABSORTCOLNAMEASC (TOK_TABLE_OR_COL value))) (TOK_LIMIT 100)))
STAGE DEPENDENCIES:
Stage-2 is a root stage
- Stage-0 depends on stages: Stage-2
- Stage-3 depends on stages: Stage-0
- Stage-4 depends on stages: Stage-2
- Stage-1 depends on stages: Stage-4
- Stage-5 depends on stages: Stage-1
+ Stage-3 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-3
+ Stage-4 depends on stages: Stage-0
+ Stage-5 depends on stages: Stage-2
+ Stage-6 depends on stages: Stage-5
+ Stage-1 depends on stages: Stage-6
+ Stage-7 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-2
@@ -47,15 +49,19 @@ STAGE PLANS:
expr: value
type: string
outputColumnNames: _col0, _col1
- Limit
- Reduce Output Operator
- sort order:
- tag: -1
- value expressions:
- expr: _col0
- type: string
- expr: _col1
- type: string
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
Select Operator
expressions:
expr: key
@@ -63,13 +69,39 @@ STAGE PLANS:
expr: value
type: string
outputColumnNames: _col0, _col1
- Limit
- File Output Operator
- compressed: false
- GlobalTableId: 0
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ Reduce Operator Tree:
+ Extract
+ Limit
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-3
+ Map Reduce
+ Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
Reduce Operator Tree:
Extract
Limit
@@ -99,15 +131,47 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.insert_into3a
- Stage: Stage-3
+ Stage: Stage-4
Stats-Aggr Operator
- Stage: Stage-4
+ Stage: Stage-5
Map Reduce
Alias -> Map Operator Tree:
#### A masked pattern was here ####
Reduce Output Operator
- sort order:
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
+ tag: -1
+ value expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ Reduce Operator Tree:
+ Extract
+ Limit
+ File Output Operator
+ compressed: false
+ GlobalTableId: 0
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+
+ Stage: Stage-6
+ Map Reduce
+ Alias -> Map Operator Tree:
+#### A masked pattern was here ####
+ Reduce Output Operator
+ key expressions:
+ expr: _col0
+ type: string
+ expr: _col1
+ type: string
+ sort order: ++
tag: -1
value expressions:
expr: _col0
@@ -143,18 +207,18 @@ STAGE PLANS:
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.insert_into3b
- Stage: Stage-5
+ Stage: Stage-7
Stats-Aggr Operator
-PREHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+PREHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@insert_into3a
PREHOOK: Output: default@insert_into3b
-POSTHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100
+POSTHOOK: query: FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@insert_into3a
@@ -179,7 +243,7 @@ POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key,
POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-7813690682
+-1254133670
PREHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3b
) t
@@ -196,7 +260,7 @@ POSTHOOK: Lineage: insert_into3a.key EXPRESSION [(src)src.FieldSchema(name:key,
POSTHOOK: Lineage: insert_into3a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-10226524244
+-1142373758
PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE insert_into3a SELECT * LIMIT 10
INSERT INTO TABLE insert_into3b SELECT * LIMIT 10
PREHOOK: type: QUERY
@@ -393,7 +457,7 @@ POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key,
POSTHOOK: Lineage: insert_into3b.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: insert_into3b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-9399898328
+-1968999674
PREHOOK: query: DROP TABLE insert_into3a
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@insert_into3a
diff --git a/ql/src/test/results/clientpositive/optrstat_groupby.q.out b/ql/src/test/results/clientpositive/optrstat_groupby.q.out
deleted file mode 100644
index b80a03f..0000000
--- a/ql/src/test/results/clientpositive/optrstat_groupby.q.out
+++ /dev/null
@@ -1,8 +0,0 @@
-PREHOOK: query: -- This test executes the OptrStatGroupBy hook which prints the optr level
--- stats of GROUPBY optr present is the plan of below query
-SELECT count(1) FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-CNTR_NAME_GBY_2_COUNT_HASH_OUT=1
-500
diff --git a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
index 1f1934f..8126ced 100644
--- a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
+++ b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -929,22 +881,6 @@
-
-
-
SEL_2
@@ -1056,22 +992,6 @@
-
-
-
FIL_4
@@ -1130,22 +1050,6 @@
-
-
-
-
-
-
FS_3
@@ -762,22 +746,6 @@
-
-
-
SEL_2
@@ -936,22 +904,6 @@
-
-
-
FIL_4
@@ -997,22 +949,6 @@
-
-
-
-
-
-
RS_3
@@ -655,25 +639,6 @@
-
-
-
GBY_2
@@ -839,22 +804,6 @@
-
-
-
SEL_1
@@ -913,22 +862,6 @@
-
-
-
-
-
-
FS_6
@@ -1362,22 +1279,6 @@
-
-
-
SEL_5
@@ -1503,25 +1404,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/groupby2.q.xml b/ql/src/test/results/compiler/plan/groupby2.q.xml
index 095d99e..648e7a3 100755
--- a/ql/src/test/results/compiler/plan/groupby2.q.xml
+++ b/ql/src/test/results/compiler/plan/groupby2.q.xml
@@ -362,22 +362,6 @@
-
-
-
RS_3
@@ -741,25 +725,6 @@
-
-
-
GBY_2
@@ -947,22 +912,6 @@
-
-
-
SEL_1
@@ -1021,22 +970,6 @@
-
-
-
-
-
-
FS_6
@@ -1559,22 +1476,6 @@
-
-
-
SEL_5
@@ -1757,25 +1658,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/groupby3.q.xml b/ql/src/test/results/compiler/plan/groupby3.q.xml
index 3127fa6..4f71542 100644
--- a/ql/src/test/results/compiler/plan/groupby3.q.xml
+++ b/ql/src/test/results/compiler/plan/groupby3.q.xml
@@ -403,22 +403,6 @@
-
-
-
RS_3
@@ -936,25 +920,6 @@
-
-
-
GBY_2
@@ -1152,22 +1117,6 @@
-
-
-
SEL_1
@@ -1213,22 +1162,6 @@
-
-
-
-
-
-
FS_6
@@ -1787,22 +1704,6 @@
-
-
-
SEL_5
@@ -2082,25 +1983,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/groupby4.q.xml b/ql/src/test/results/compiler/plan/groupby4.q.xml
index f268309..e51e024 100644
--- a/ql/src/test/results/compiler/plan/groupby4.q.xml
+++ b/ql/src/test/results/compiler/plan/groupby4.q.xml
@@ -292,22 +292,6 @@
-
-
-
RS_3
@@ -444,25 +428,6 @@
-
-
-
GBY_2
@@ -599,22 +564,6 @@
-
-
-
SEL_1
@@ -660,22 +609,6 @@
-
-
-
-
-
-
FS_6
@@ -1102,22 +1019,6 @@
-
-
-
SEL_5
@@ -1203,25 +1104,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/groupby5.q.xml b/ql/src/test/results/compiler/plan/groupby5.q.xml
index 66b00d4..bed1ca1 100644
--- a/ql/src/test/results/compiler/plan/groupby5.q.xml
+++ b/ql/src/test/results/compiler/plan/groupby5.q.xml
@@ -314,22 +314,6 @@
-
-
-
RS_3
@@ -502,25 +486,6 @@
-
-
-
GBY_2
@@ -686,22 +651,6 @@
-
-
-
SEL_1
@@ -760,22 +709,6 @@
-
-
-
-
-
-
FS_6
@@ -1228,22 +1145,6 @@
-
-
-
SEL_5
@@ -1375,25 +1276,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/groupby6.q.xml b/ql/src/test/results/compiler/plan/groupby6.q.xml
index 89b5026..8bd6751 100644
--- a/ql/src/test/results/compiler/plan/groupby6.q.xml
+++ b/ql/src/test/results/compiler/plan/groupby6.q.xml
@@ -292,22 +292,6 @@
-
-
-
RS_3
@@ -444,25 +428,6 @@
-
-
-
GBY_2
@@ -599,22 +564,6 @@
-
-
-
SEL_1
@@ -660,22 +609,6 @@
-
-
-
-
-
-
FS_6
@@ -1102,22 +1019,6 @@
-
-
-
SEL_5
@@ -1203,25 +1104,6 @@
-
-
-
GBY_4
diff --git a/ql/src/test/results/compiler/plan/input1.q.xml b/ql/src/test/results/compiler/plan/input1.q.xml
index e2ac867..f9aca0b 100755
--- a/ql/src/test/results/compiler/plan/input1.q.xml
+++ b/ql/src/test/results/compiler/plan/input1.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -821,22 +773,6 @@
-
-
-
SEL_2
@@ -934,22 +870,6 @@
-
-
-
FIL_4
@@ -1008,22 +928,6 @@
-
-
-
-
-
-
FS_11
@@ -224,22 +208,6 @@
-
-
-
TS_10
@@ -677,22 +645,6 @@
-
-
-
FS_13
@@ -741,22 +693,6 @@
-
-
-
TS_12
@@ -1198,22 +1134,6 @@
-
-
-
FS_15
@@ -1262,22 +1182,6 @@
-
-
-
TS_14
@@ -1779,22 +1683,6 @@
-
-
-
FS_3
@@ -1871,22 +1759,6 @@
-
-
-
SEL_2
@@ -1984,22 +1856,6 @@
-
-
-
FIL_1
@@ -2090,22 +1946,6 @@
-
-
-
FS_6
@@ -2179,22 +2019,6 @@
-
-
-
SEL_5
@@ -2335,22 +2159,6 @@
-
-
-
FIL_4
@@ -2424,22 +2232,6 @@
-
-
-
FS_9
@@ -2514,22 +2306,6 @@
-
-
-
SEL_8
@@ -2616,22 +2392,6 @@
-
-
-
FIL_7
@@ -2667,22 +2427,6 @@
-
-
-
-
-
-
RS_3
@@ -511,22 +495,6 @@
-
-
-
SCR_2
@@ -686,22 +654,6 @@
-
-
-
SEL_1
@@ -754,22 +706,6 @@
-
-
-
-
-
-
FS_7
@@ -1302,22 +1222,6 @@
-
-
-
SCR_6
@@ -1422,22 +1326,6 @@
-
-
-
SEL_5
@@ -1497,22 +1385,6 @@
-
-
-
EX_4
diff --git a/ql/src/test/results/compiler/plan/input3.q.xml b/ql/src/test/results/compiler/plan/input3.q.xml
index bcbb126..600035f 100755
--- a/ql/src/test/results/compiler/plan/input3.q.xml
+++ b/ql/src/test/results/compiler/plan/input3.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_14
@@ -224,22 +208,6 @@
-
-
-
TS_13
@@ -677,22 +645,6 @@
-
-
-
FS_16
@@ -741,22 +693,6 @@
-
-
-
TS_15
@@ -1198,22 +1134,6 @@
-
-
-
FS_18
@@ -1262,22 +1182,6 @@
-
-
-
TS_17
@@ -1657,22 +1561,6 @@
-
-
-
FS_20
@@ -1708,22 +1596,6 @@
-
-
-
TS_19
@@ -2156,22 +2028,6 @@
-
-
-
FS_3
@@ -2248,22 +2104,6 @@
-
-
-
SEL_2
@@ -2361,22 +2201,6 @@
-
-
-
FIL_1
@@ -2467,22 +2291,6 @@
-
-
-
FS_6
@@ -2556,22 +2364,6 @@
-
-
-
SEL_5
@@ -2712,22 +2504,6 @@
-
-
-
FIL_4
@@ -2801,22 +2577,6 @@
-
-
-
FS_9
@@ -2891,22 +2651,6 @@
-
-
-
SEL_8
@@ -3044,22 +2788,6 @@
-
-
-
FIL_7
@@ -3118,22 +2846,6 @@
-
-
-
FS_12
@@ -3187,22 +2899,6 @@
-
-
-
SEL_11
@@ -3282,22 +2978,6 @@
-
-
-
FIL_10
@@ -3336,22 +3016,6 @@
-
-
-
-
-
-
RS_3
@@ -592,22 +576,6 @@
-
-
-
FIL_8
@@ -740,22 +708,6 @@
-
-
-
SCR_2
@@ -833,22 +785,6 @@
-
-
-
SEL_1
@@ -901,22 +837,6 @@
-
-
-
-
-
-
FS_7
@@ -1374,22 +1278,6 @@
-
-
-
SEL_6
@@ -1455,22 +1343,6 @@
-
-
-
EX_4
diff --git a/ql/src/test/results/compiler/plan/input5.q.xml b/ql/src/test/results/compiler/plan/input5.q.xml
index 3f0df71..fa76634 100644
--- a/ql/src/test/results/compiler/plan/input5.q.xml
+++ b/ql/src/test/results/compiler/plan/input5.q.xml
@@ -493,22 +493,6 @@
-
-
-
RS_3
@@ -668,22 +652,6 @@
-
-
-
SCR_2
@@ -800,22 +768,6 @@
-
-
-
SEL_1
@@ -868,22 +820,6 @@
-
-
-
-
-
-
FS_6
@@ -1412,22 +1332,6 @@
-
-
-
SEL_5
@@ -1493,22 +1397,6 @@
-
-
-
EX_4
diff --git a/ql/src/test/results/compiler/plan/input6.q.xml b/ql/src/test/results/compiler/plan/input6.q.xml
index dd9cb76..8130ab2 100644
--- a/ql/src/test/results/compiler/plan/input6.q.xml
+++ b/ql/src/test/results/compiler/plan/input6.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -821,22 +773,6 @@
-
-
-
SEL_2
@@ -920,22 +856,6 @@
-
-
-
FIL_4
@@ -994,22 +914,6 @@
-
-
-
-
-
-
FS_4
@@ -224,22 +208,6 @@
-
-
-
TS_3
@@ -728,22 +696,6 @@
-
-
-
FS_2
@@ -811,22 +763,6 @@
-
-
-
SEL_1
@@ -886,22 +822,6 @@
-
-
-
-
-
-
FS_2
@@ -471,22 +455,6 @@
-
-
-
SEL_1
@@ -558,22 +526,6 @@
-
-
-
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -815,22 +767,6 @@
-
-
-
SEL_2
@@ -916,22 +852,6 @@
-
-
-
FIL_4
@@ -977,22 +897,6 @@
-
-
-
-
-
-
FS_3
@@ -462,22 +446,6 @@
-
-
-
SEL_2
@@ -613,22 +581,6 @@
-
-
-
FIL_4
@@ -713,22 +665,6 @@
-
-
-
-
-
-
FS_4
@@ -224,22 +208,6 @@
-
-
-
TS_3
@@ -728,22 +696,6 @@
-
-
-
FS_2
@@ -817,22 +769,6 @@
-
-
-
SEL_1
@@ -891,22 +827,6 @@
-
-
-
-
-
-
FS_2
@@ -521,22 +505,6 @@
-
-
-
SEL_1
@@ -608,22 +576,6 @@
-
-
-
-
-
-
FS_3
@@ -482,22 +466,6 @@
-
-
-
SEL_2
@@ -649,22 +617,6 @@
-
-
-
FIL_4
@@ -736,22 +688,6 @@
-
-
-
-
-
-
src2
@@ -694,22 +678,6 @@
-
-
-
-
-
-
src1
@@ -996,22 +948,6 @@
-
-
-
-
-
-
FS_6
@@ -1468,22 +1388,6 @@
-
-
-
SEL_5
@@ -1676,22 +1580,6 @@
-
-
-
JOIN_4
diff --git a/ql/src/test/results/compiler/plan/join2.q.xml b/ql/src/test/results/compiler/plan/join2.q.xml
index 74ce1d7..6b79d03 100644
--- a/ql/src/test/results/compiler/plan/join2.q.xml
+++ b/ql/src/test/results/compiler/plan/join2.q.xml
@@ -507,22 +507,6 @@
-
-
-
RS_6
@@ -558,22 +542,6 @@
-
-
-
TS_12
@@ -777,22 +745,6 @@
-
-
-
src3
@@ -841,22 +793,6 @@
-
-
-
-
-
-
FS_10
@@ -1374,22 +1294,6 @@
-
-
-
SEL_9
@@ -1598,22 +1502,6 @@
-
-
-
JOIN_8
@@ -2147,22 +2035,6 @@
-
-
-
src2
@@ -2211,22 +2083,6 @@
-
-
-
-
-
-
src1
@@ -2506,22 +2346,6 @@
-
-
-
-
-
-
FS_11
@@ -3007,22 +2815,6 @@
-
-
-
JOIN_5
diff --git a/ql/src/test/results/compiler/plan/join3.q.xml b/ql/src/test/results/compiler/plan/join3.q.xml
index 2d5de44..65be32f 100644
--- a/ql/src/test/results/compiler/plan/join3.q.xml
+++ b/ql/src/test/results/compiler/plan/join3.q.xml
@@ -743,22 +743,6 @@
-
-
-
src2
@@ -807,22 +791,6 @@
-
-
-
-
-
-
src3
@@ -1136,22 +1088,6 @@
-
-
-
-
-
-
src1
@@ -1434,22 +1354,6 @@
-
-
-
-
-
-
FS_8
@@ -1909,22 +1797,6 @@
-
-
-
SEL_7
@@ -2151,22 +2023,6 @@
-
-
-
JOIN_6
diff --git a/ql/src/test/results/compiler/plan/join4.q.xml b/ql/src/test/results/compiler/plan/join4.q.xml
index 0ec18b8..ad6f640 100644
--- a/ql/src/test/results/compiler/plan/join4.q.xml
+++ b/ql/src/test/results/compiler/plan/join4.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1948,22 +1804,6 @@
-
-
-
SEL_9
@@ -2226,22 +2066,6 @@
-
-
-
JOIN_8
diff --git a/ql/src/test/results/compiler/plan/join5.q.xml b/ql/src/test/results/compiler/plan/join5.q.xml
index 16d91be..2b57f55 100644
--- a/ql/src/test/results/compiler/plan/join5.q.xml
+++ b/ql/src/test/results/compiler/plan/join5.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1948,22 +1804,6 @@
-
-
-
SEL_9
@@ -2222,22 +2062,6 @@
-
-
-
JOIN_8
diff --git a/ql/src/test/results/compiler/plan/join6.q.xml b/ql/src/test/results/compiler/plan/join6.q.xml
index efdde99..4b9d4fd 100644
--- a/ql/src/test/results/compiler/plan/join6.q.xml
+++ b/ql/src/test/results/compiler/plan/join6.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1948,22 +1804,6 @@
-
-
-
SEL_9
@@ -2229,22 +2069,6 @@
-
-
-
JOIN_8
diff --git a/ql/src/test/results/compiler/plan/join7.q.xml b/ql/src/test/results/compiler/plan/join7.q.xml
index c0a1431..e4b9a69 100644
--- a/ql/src/test/results/compiler/plan/join7.q.xml
+++ b/ql/src/test/results/compiler/plan/join7.q.xml
@@ -614,22 +614,6 @@
-
-
-
a
@@ -737,22 +721,6 @@
-
-
-
SEL_8
@@ -895,22 +863,6 @@
-
-
-
FIL_16
@@ -969,22 +921,6 @@
-
-
-
-
-
-
b
@@ -1332,22 +1252,6 @@
-
-
-
SEL_2
@@ -1482,22 +1386,6 @@
-
-
-
FIL_17
@@ -1556,22 +1444,6 @@
-
-
-
-
-
-
c
@@ -1915,22 +1771,6 @@
-
-
-
SEL_5
@@ -2065,22 +1905,6 @@
-
-
-
FIL_18
@@ -2139,22 +1963,6 @@
-
-
-
-
-
-
FS_15
@@ -2748,22 +2540,6 @@
-
-
-
SEL_13
@@ -3134,22 +2910,6 @@
-
-
-
JOIN_12
diff --git a/ql/src/test/results/compiler/plan/join8.q.xml b/ql/src/test/results/compiler/plan/join8.q.xml
index c727989..72b3e48 100644
--- a/ql/src/test/results/compiler/plan/join8.q.xml
+++ b/ql/src/test/results/compiler/plan/join8.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -788,22 +756,6 @@
-
-
-
FIL_14
@@ -862,22 +814,6 @@
-
-
-
-
-
-
b
@@ -1225,22 +1145,6 @@
-
-
-
SEL_2
@@ -1416,22 +1320,6 @@
-
-
-
FIL_15
@@ -1490,22 +1378,6 @@
-
-
-
-
-
-
FS_12
@@ -2034,22 +1890,6 @@
-
-
-
SEL_9
@@ -2157,22 +1997,6 @@
-
-
-
FIL_13
@@ -2423,22 +2247,6 @@
-
-
-
JOIN_8
diff --git a/ql/src/test/results/compiler/plan/sample1.q.xml b/ql/src/test/results/compiler/plan/sample1.q.xml
index 03b7114..0e5729f 100644
--- a/ql/src/test/results/compiler/plan/sample1.q.xml
+++ b/ql/src/test/results/compiler/plan/sample1.q.xml
@@ -266,22 +266,6 @@
-
-
-
FS_4
@@ -465,22 +449,6 @@
-
-
-
SEL_3
@@ -721,22 +689,6 @@
-
-
-
FIL_1
@@ -821,22 +773,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1049,22 +985,6 @@
-
-
-
FIL_1
@@ -1123,22 +1043,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1036,22 +972,6 @@
-
-
-
FIL_1
@@ -1110,22 +1030,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_7
@@ -224,22 +208,6 @@
-
-
-
TS_6
@@ -740,22 +708,6 @@
-
-
-
FS_4
@@ -836,22 +788,6 @@
-
-
-
SEL_3
@@ -1084,22 +1020,6 @@
-
-
-
FIL_5
@@ -1158,22 +1078,6 @@
-
-
-
-
-
-
FS_7
@@ -157,22 +141,6 @@
-
-
-
TS_6
@@ -596,22 +564,6 @@
-
-
-
FS_4
@@ -688,22 +640,6 @@
-
-
-
SEL_2
@@ -807,22 +743,6 @@
-
-
-
FIL_5
@@ -881,22 +801,6 @@
-
-
-
-
-
-
FS_3
@@ -1458,22 +1442,6 @@
-
-
-
SEL_2
@@ -1762,22 +1730,6 @@
-
-
-
FIL_4
@@ -1823,22 +1775,6 @@
-
-
-
-
-
-
FS_2
@@ -1440,22 +1424,6 @@
-
-
-
SEL_1
@@ -1735,22 +1703,6 @@
-
-
-
diff --git a/ql/src/test/results/compiler/plan/udf6.q.xml b/ql/src/test/results/compiler/plan/udf6.q.xml
index 65f96e1..6735f6a 100644
--- a/ql/src/test/results/compiler/plan/udf6.q.xml
+++ b/ql/src/test/results/compiler/plan/udf6.q.xml
@@ -225,22 +225,6 @@
-
-
-
FS_2
@@ -419,22 +403,6 @@
-
-
-
SEL_1
@@ -493,22 +461,6 @@
-
-
-
diff --git a/ql/src/test/results/compiler/plan/udf_case.q.xml b/ql/src/test/results/compiler/plan/udf_case.q.xml
index 308732b..ebc098a 100644
--- a/ql/src/test/results/compiler/plan/udf_case.q.xml
+++ b/ql/src/test/results/compiler/plan/udf_case.q.xml
@@ -229,22 +229,6 @@
-
-
-
FS_3
@@ -304,22 +288,6 @@
-
-
-
LIM_2
@@ -536,22 +504,6 @@
-
-
-
SEL_1
@@ -583,22 +535,6 @@
-
-
-
diff --git a/ql/src/test/results/compiler/plan/udf_when.q.xml b/ql/src/test/results/compiler/plan/udf_when.q.xml
index 82a0dcf..71e4880 100644
--- a/ql/src/test/results/compiler/plan/udf_when.q.xml
+++ b/ql/src/test/results/compiler/plan/udf_when.q.xml
@@ -229,22 +229,6 @@
-
-
-
FS_3
@@ -304,22 +288,6 @@
-
-
-
LIM_2
@@ -616,22 +584,6 @@
-
-
-
SEL_1
@@ -663,22 +615,6 @@
-
-
-
diff --git a/ql/src/test/results/compiler/plan/union.q.xml b/ql/src/test/results/compiler/plan/union.q.xml
index 1201336..16289da 100644
--- a/ql/src/test/results/compiler/plan/union.q.xml
+++ b/ql/src/test/results/compiler/plan/union.q.xml
@@ -89,22 +89,6 @@
-
-
-
FS_12
@@ -157,22 +141,6 @@
-
-
-
TS_11
@@ -752,22 +720,6 @@
-
-
-
FS_8
@@ -844,22 +796,6 @@
-
-
-
SEL_7
@@ -917,22 +853,6 @@
-
-
-
UNION_6
@@ -1009,22 +929,6 @@
-
-
-
SEL_5
@@ -1088,22 +992,6 @@
-
-
-
FIL_10
@@ -1128,22 +1016,6 @@
-
-
-
-
-
-
SEL_2
@@ -1486,22 +1342,6 @@
-
-
-
FIL_9
@@ -1560,22 +1400,6 @@
-
-
-