diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 1672453..0a76de5 100644
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -512,8 +512,6 @@
HIVEDEBUGLOCALTASK("hive.debug.localtask",false),
- HIVEJOBPROGRESS("hive.task.progress", false),
-
HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"),
HIVEENFORCEBUCKETING("hive.enforce.bucketing", false),
@@ -763,6 +761,9 @@
"org.apache.hadoop.hive.ql.exec.PTFPersistence$PartitionedByteBasedList"),
HIVE_PTF_PARTITION_PERSISTENT_SIZE("hive.ptf.partition.persistence.memsize",
(int) Math.pow(2, (5 + 10 + 10)) ), // 32MB
+ HIVECOUNTERGROUP("hive.counters.group.name", "HIVE"),
+ HIVECOUNTERCREATEDFILES("hive.counters.created.files", "CREATED_FILES"),
+ HIVECOUNTERFATAL("hive.counters.fatal", "FATAL_ERROR"),
;
public final String varname;
diff --git conf/hive-default.xml.template conf/hive-default.xml.template
index 3a7d1dc..7849e51 100644
--- conf/hive-default.xml.template
+++ conf/hive-default.xml.template
@@ -710,12 +710,6 @@
- hive.task.progress
- false
- Whether Hive should periodically update task progress counters during execution. Enabling this allows task progress to be monitored more closely in the job tracker, but may impose a performance penalty. This flag is automatically set to true for jobs with hive.exec.dynamic.partition set to true.
-
-
-
hive.hwi.war.file
lib/hive-hwi-@VERSION@.war
This sets the path to the HWI war file, relative to ${HIVE_HOME}.
diff --git data/conf/hive-site.xml data/conf/hive-site.xml
index 544ba35..cd98f2e 100644
--- data/conf/hive-site.xml
+++ data/conf/hive-site.xml
@@ -144,12 +144,6 @@
- hive.task.progress
- false
- Track progress of a task
-
-
-
hive.support.concurrency
true
Whether hive supports concurrency or not. A zookeeper instance must be up and running for the default hive lock manager to support read-write locks.
diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 23cdfc2..b0e586e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -333,7 +333,6 @@ private void updateCountersInQueryPlan() {
// if the task has started, all operators within the task have
// started
op.setStarted(started.contains(task.getTaskId()));
- op.setOperatorCounters(counters.get(op.getOperatorId()));
// if the task is done, all operators are done as well
op.setDone(done.contains(task.getTaskId()));
}
@@ -382,8 +381,6 @@ private void extractCounters() throws IOException {
}
if (task instanceof ExecDriver) {
ExecDriver mrTask = (ExecDriver) task;
- extractOperatorCounters(mrTask.getWork().getAliasToWork().values(),
- task.getId() + "_MAP");
if (mrTask.mapStarted()) {
started.add(task.getId() + "_MAP");
}
@@ -394,7 +391,6 @@ private void extractCounters() throws IOException {
Collection> reducerTopOps =
new ArrayList>();
reducerTopOps.add(mrTask.getWork().getReducer());
- extractOperatorCounters(reducerTopOps, task.getId() + "_REDUCE");
if (mrTask.reduceStarted()) {
started.add(task.getId() + "_REDUCE");
}
@@ -413,34 +409,6 @@ private void extractCounters() throws IOException {
}
}
- private void extractOperatorCounters(
- Collection> topOps, String taskId) {
- Queue> opsToVisit =
- new LinkedList>();
- Set> opsVisited =
- new HashSet>();
- opsToVisit.addAll(topOps);
- while (opsToVisit.size() != 0) {
- Operator extends OperatorDesc> op = opsToVisit.remove();
- opsVisited.add(op);
- Map ctrs = op.getCounters();
- if (ctrs != null) {
- counters.put(op.getOperatorId(), op.getCounters());
- }
- if (op.getDone()) {
- done.add(op.getOperatorId());
- }
- if (op.getChildOperators() != null) {
- for (Operator extends OperatorDesc> childOp : op.getChildOperators()) {
- if (!opsVisited.contains(childOp)) {
- opsToVisit.add(childOp);
- }
- }
- }
- }
-
- }
-
public org.apache.hadoop.hive.ql.plan.api.Query getQueryPlan()
throws IOException {
if (query.getStageGraph() == null) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index 1fff1e9..6e5492d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -132,8 +132,6 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.nextSz = clone.nextSz;
this.childOperators = clone.childOperators;
this.parentOperators = clone.parentOperators;
- this.counterNames = clone.counterNames;
- this.counterNameToEnum = clone.counterNameToEnum;
this.done = clone.done;
this.operatorId = clone.operatorId;
this.storage = clone.storage;
@@ -141,12 +139,9 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.conf = clone.getConf();
this.setSchema(clone.getSchema());
this.alias = clone.alias;
- this.beginTime = clone.beginTime;
- this.inputRows = clone.inputRows;
this.childOperatorsArray = clone.childOperatorsArray;
this.childOperatorsTag = clone.childOperatorsTag;
this.colExprMap = clone.colExprMap;
- this.counters = clone.counters;
this.dummyObj = clone.dummyObj;
this.dummyObjVectors = clone.dummyObjVectors;
this.forwardCache = clone.forwardCache;
@@ -155,7 +150,6 @@ public CommonJoinOperator(CommonJoinOperator clone) {
this.hconf = clone.hconf;
this.id = clone.id;
this.inputObjInspectors = clone.inputObjInspectors;
- this.inputRows = clone.inputRows;
this.noOuterJoin = clone.noOuterJoin;
this.numAliases = clone.numAliases;
this.operatorId = clone.operatorId;
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java
index 4da68a0..b1238b9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java
@@ -179,18 +179,15 @@ public ExecDriver(MapredWork plan, JobConf job, boolean isSilent) throws HiveExc
*
* @return true if fatal errors happened during job execution, false otherwise.
*/
+ @SuppressWarnings("deprecation")
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
- for (Operator extends OperatorDesc> op : work.getAliasToWork().values()) {
- if (op.checkFatalErrors(ctrs, errMsg)) {
- return true;
- }
- }
- if (work.getReducer() != null) {
- if (work.getReducer().checkFatalErrors(ctrs, errMsg)) {
- return true;
- }
+ Counters.Counter cntr = ctrs.findCounter(
+ HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERFATAL));
+ if (cntr == null || cntr.getValue() == 0) {
+ return false;
}
- return false;
+ return true;
}
protected void createTmpDirs() throws IOException {
@@ -966,16 +963,6 @@ protected void localizeMRTmpFilesImpl(Context ctx) {
}
@Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- for (Operator extends OperatorDesc> op : work.getAliasToWork().values()) {
- op.updateCounters(ctrs);
- }
- if (work.getReducer() != null) {
- work.getReducer().updateCounters(ctrs);
- }
- }
-
- @Override
public void logPlanProgress(SessionState ss) throws IOException {
ss.getHiveHistory().logPlanProgress(queryPlan);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java
index 08ffcb1..6de62bc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java
@@ -28,6 +28,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.OperatorDesc;
@@ -157,10 +159,16 @@ public void map(Object key, Object value, OutputCollector output,
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
- } else {
- l4j.fatal(StringUtils.stringifyException(e));
- throw new RuntimeException(e);
+ } else if (e instanceof HiveFatalException) {
+ if (rp != null) {
+ rp.incrCounter(HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERFATAL),
+ 1);
+ }
}
+
+ l4j.fatal(StringUtils.stringifyException(e));
+ throw new RuntimeException(e);
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java
index 89d9643..2ff106e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java
@@ -25,12 +25,13 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
-import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.exec.ExecMapper.reportStats;
import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.HiveFatalException;
import org.apache.hadoop.hive.ql.plan.MapredWork;
import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.serde2.Deserializer;
@@ -250,7 +251,7 @@ public void reduce(Object key, Iterator values, OutputCollector output,
}
}
try {
- reducer.process(row, tag.get());
+ reducer.processOp(row, tag.get());
} catch (Exception e) {
String rowString = null;
try {
@@ -269,10 +270,16 @@ public void reduce(Object key, Iterator values, OutputCollector output,
if (e instanceof OutOfMemoryError) {
// Don't create a new object if we are already out of memory
throw (OutOfMemoryError) e;
- } else {
- l4j.fatal(StringUtils.stringifyException(e));
- throw new RuntimeException(e);
+ } else if (e instanceof HiveFatalException) {
+ if (rp != null) {
+ rp.incrCounter(HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(jc, HiveConf.ConfVars.HIVECOUNTERFATAL),
+ 1);
+ }
}
+
+ l4j.fatal(StringUtils.stringifyException(e));
+ throw new RuntimeException(e);
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
index dfd4524..053e497 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
@@ -487,7 +487,7 @@ public boolean doNext(WritableComparable key, Writable value) throws IOException
public boolean pushRow() throws IOException, HiveException {
InspectableObject row = getNextRow();
if (row != null) {
- operator.process(row.o, 0);
+ operator.processOp(row.o, 0);
}
return row != null;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 0bf6add..7afbe93 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.SkewedValueList;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
@@ -518,7 +519,9 @@ private void createBucketFiles(FSPaths fsp) throws HiveException {
reporter);
// increment the CREATED_FILES counter
if (reporter != null) {
- reporter.incrCounter(ProgressCounter.CREATED_FILES, 1);
+ reporter.incrCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERCREATEDFILES),
+ 1);
}
filesIdx++;
}
@@ -573,16 +576,6 @@ public void processOp(Object row, int tag) throws HiveException {
}
}
- // Since File Sink is a terminal operator, forward is not called - so,
- // maintain the number of output rows explicitly
- if (counterNameToEnum != null) {
- ++outputRows;
- if (outputRows % 1000 == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
-
try {
updateProgress();
@@ -755,11 +748,8 @@ private FSPaths getDynOutPaths(List row, String lbDirName) throws HiveEx
// check # of dp
if (valToPaths.size() > maxPartitions) {
// throw fatal error
- if (counterNameToEnum != null) {
- incrCounter(fatalErrorCntr, 1);
- }
- fatalError = true;
LOG.error("Fatal error was thrown due to exceeding number of dynamic partitions");
+ throw new HiveException("Fatal Error: Too many partitions");
}
fsp2 = createNewPaths(dpDir);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
index 532a49b..004db03 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java
@@ -1055,12 +1055,6 @@ protected void forward(Object[] keys,
public void closeOp(boolean abort) throws HiveException {
if (!abort) {
try {
- // put the hash related stats in statsMap if applicable, so that they
- // are sent to jt as counters
- if (hashAggr && counterNameToEnum != null) {
- incrCounter(counterNameHashOut, numRowsHashTbl);
- }
-
// If there is no grouping key and no row came to this operator
if (firstRow && (keyFields.length == 0)) {
firstRow = false;
@@ -1120,13 +1114,6 @@ public void closeOp(boolean abort) throws HiveException {
}
}
- @Override
- protected List getAdditionalCounters() {
- List ctrList = new ArrayList();
- ctrList.add(getWrappedCounterName(counterNameHashOut));
- return ctrList;
- }
-
// Group by contains the columns needed - no need to aggregate from children
public List genColLists(
HashMap, OpParseContext> opParseCtx) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
index effc540..23a077e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
@@ -34,8 +34,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.MapRedStats;
-import org.apache.hadoop.hive.ql.exec.Operator.ProgressCounter;
import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
import org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob;
import org.apache.hadoop.hive.ql.session.SessionState;
@@ -81,9 +81,6 @@ private void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
// we may still be able to retrieve the job status - so ignore
return;
}
- if(callBackObj != null) {
- callBackObj.updateCounters(ctrs, rj);
- }
}
/**
@@ -194,6 +191,7 @@ public static void killRunningJobs() {
}
}
+ @SuppressWarnings("deprecation")
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
if (ctrs == null) {
// hadoop might return null if it cannot locate the job.
@@ -201,7 +199,9 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
return false;
}
// check for number of created files
- long numFiles = ctrs.getCounter(ProgressCounter.CREATED_FILES);
+ Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP),
+ HiveConf.getVar(job, ConfVars.HIVECOUNTERCREATEDFILES));
+ long numFiles = cntr != null ? cntr.getValue() : 0;
long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES);
if (numFiles > upperLimit) {
errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit);
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHook.java ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHook.java
index b8bc5e4..cee27ca 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHook.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHook.java
@@ -22,13 +22,10 @@
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.mapred.Counters;
-import org.apache.hadoop.mapred.RunningJob;
@SuppressWarnings("deprecation")
public interface HadoopJobExecHook {
-
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException;
public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg);
public void logPlanProgress(SessionState ss) throws IOException;
-
+
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java
index dd0efc7..32870d8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapredLocalTask.java
@@ -336,7 +336,7 @@ private void startForward(boolean inputFileChangeSenstive, String bigTableBucket
break;
}
fetchOpRows++;
- forwardOp.process(row.o, 0);
+ forwardOp.processOp(row.o, 0);
// check if any operator had a fatal error or early exit during
// execution
if (forwardOp.getDone()) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 564e166..74bd174 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -42,7 +42,6 @@
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.mapred.Counters;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
@@ -60,22 +59,7 @@
protected List> childOperators;
protected List> parentOperators;
protected String operatorId;
- /**
- * List of counter names associated with the operator. It contains the
- * following default counters NUM_INPUT_ROWS NUM_OUTPUT_ROWS TIME_TAKEN
- * Individual operators can add to this list via addToCounterNames methods.
- */
- protected ArrayList counterNames;
-
- /**
- * Each operator has its own map of its counter names to disjoint
- * ProgressCounter - it is populated at compile time and is read in at
- * run-time while extracting the operator specific counts.
- */
- protected HashMap counterNameToEnum;
-
private transient ExecMapperContext execContext;
-
private static int seqId;
// It can be optimized later so that an operator operator (init/close) is performed
@@ -99,9 +83,6 @@
protected transient State state = State.UNINIT;
- static transient boolean fatalError = false; // fatalError is shared acorss
- // all operators
-
static {
seqId = 0;
}
@@ -110,6 +91,7 @@
public Operator() {
id = String.valueOf(seqId++);
+ initOperatorId();
}
public static void resetId() {
@@ -186,10 +168,30 @@ public T getConf() {
}
public boolean getDone() {
- return done || fatalError;
+ return done;
}
- public void setDone(boolean done) {
+ private final void setChildIsDone(Operator extends OperatorDesc> op) {
+ this.removeChildFromProcessing(op);
+ if (this.childOperatorsArray.length == 0) {
+ setDone(true);
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ protected final void setDone(boolean done) {
+ if (done && !this.done) {
+ // no more forwarding
+ childOperatorsArray = new Operator[0];
+ childOperatorsTag = new int[0];
+
+ // tell parents we're done
+ if (getParentOperators() != null) {
+ for (Operator extends OperatorDesc> op: getParentOperators()) {
+ op.setChildIsDone(this);
+ }
+ }
+ }
this.done = done;
}
@@ -207,6 +209,7 @@ public RowSchema getSchema() {
// non-bean ..
protected transient HashMap, LongWritable> statsMap = new HashMap, LongWritable>();
+ @SuppressWarnings("rawtypes")
protected transient OutputCollector out;
protected transient Log LOG = LogFactory.getLog(this.getClass().getName());
protected transient boolean isLogInfoEnabled = LOG.isInfoEnabled();
@@ -253,6 +256,7 @@ public void setReporter(Reporter rep) {
}
}
+ @SuppressWarnings("rawtypes")
public void setOutputCollector(OutputCollector out) {
this.out = out;
@@ -321,6 +325,7 @@ protected boolean areAllParentsInitialized() {
* ignored.
* @throws HiveException
*/
+ @SuppressWarnings("unchecked")
public void initialize(Configuration hconf, ObjectInspector[] inputOIs)
throws HiveException {
if (state == State.INIT) {
@@ -361,6 +366,9 @@ public void initialize(Configuration hconf, ObjectInspector[] inputOIs)
"Hive internal error: cannot find parent in the child operator!");
}
}
+ } else {
+ childOperatorsArray = new Operator[0];
+ childOperatorsTag = new int[0];
}
if (inputObjInspectors.length == 0) {
@@ -471,38 +479,6 @@ public void setInputObjInspectors(ObjectInspector[] inputObjInspectors) {
*/
public abstract void processOp(Object row, int tag) throws HiveException;
- /**
- * Process the row.
- *
- * @param row
- * The object representing the row.
- * @param tag
- * The tag of the row usually means which parent this row comes from.
- * Rows with the same tag should have exactly the same rowInspector
- * all the time.
- */
- public void process(Object row, int tag) throws HiveException {
- if (fatalError) {
- return;
- }
-
- if (counterNameToEnum != null) {
- inputRows++;
- if ((inputRows % 1000) == 0) {
- incrCounter(numInputRowsCntr, inputRows);
- incrCounter(timeTakenCntr, totalTime);
- inputRows = 0;
- totalTime = 0;
- }
-
- beginTime = System.currentTimeMillis();
- processOp(row, tag);
- totalTime += (System.currentTimeMillis() - beginTime);
- } else {
- processOp(row, tag);
- }
- }
-
// If a operator wants to do some work at the beginning of a group
public void startGroup() throws HiveException {
LOG.debug("Starting group");
@@ -511,10 +487,6 @@ public void startGroup() throws HiveException {
return;
}
- if (fatalError) {
- return;
- }
-
LOG.debug("Starting group for children:");
for (Operator extends OperatorDesc> op : childOperators) {
op.startGroup();
@@ -531,10 +503,6 @@ public void endGroup() throws HiveException {
return;
}
- if (fatalError) {
- return;
- }
-
LOG.debug("Ending group for children:");
for (Operator extends OperatorDesc> op : childOperators) {
op.endGroup();
@@ -576,14 +544,6 @@ public void close(boolean abort) throws HiveException {
state = State.CLOSE;
LOG.info(id + " finished. closing... ");
- if (counterNameToEnum != null) {
- incrCounter(numInputRowsCntr, inputRows);
- incrCounter(numOutputRowsCntr, outputRows);
- incrCounter(timeTakenCntr, totalTime);
- }
-
- LOG.info(id + " forwarded " + cntr + " rows");
-
// call the operator specific close routine
closeOp(abort);
@@ -651,10 +611,6 @@ public void jobClose(Configuration conf, boolean success, JobCloseFeedBack feedB
protected transient Operator extends OperatorDesc>[] childOperatorsArray = null;
protected transient int[] childOperatorsTag;
- // counters for debugging
- private transient long cntr = 0;
- private transient long nextCntr = 1;
-
/**
* Replace one child with another at the same position. The parent of the
* child is not changed
@@ -671,6 +627,22 @@ public void replaceChild(Operator extends OperatorDesc> child,
childOperators.set(childIndex, newChild);
}
+ private void removeChildFromProcessing(Operator extends OperatorDesc> op) {
+ @SuppressWarnings("unchecked")
+ Operator extends OperatorDesc>[] processingArray = new Operator[childOperatorsArray.length-1];
+ int[] processingTags = new int[childOperatorsArray.length-1];
+ int p = 0, q = 0;
+ for (; p < childOperatorsArray.length; ++p) {
+ if (childOperatorsArray[p] != op) {
+ processingArray[q] = childOperatorsArray[p];
+ processingTags[q] = childOperatorsTag[p];
+ ++q;
+ }
+ }
+ childOperatorsArray = processingArray;
+ childOperatorsTag = processingTags;
+ }
+
public void removeChild(Operator extends OperatorDesc> child) {
int childIndex = childOperators.indexOf(child);
assert childIndex != -1;
@@ -779,63 +751,17 @@ public void replaceParent(Operator extends OperatorDesc> parent,
parentOperators.set(parentIndex, newParent);
}
- private long getNextCntr(long cntr) {
- // A very simple counter to keep track of number of rows processed by an
- // operator. It dumps
- // every 1 million times, and quickly before that
- if (cntr >= 1000000) {
- return cntr + 1000000;
- }
-
- return 10 * cntr;
- }
-
protected void forward(Object row, ObjectInspector rowInspector)
throws HiveException {
- if (counterNameToEnum != null) {
- if ((++outputRows % 1000) == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
-
- if (isLogInfoEnabled) {
- cntr++;
- if (cntr == nextCntr) {
- LOG.info(id + " forwarding " + cntr + " rows");
- nextCntr = getNextCntr(cntr);
- }
- }
-
// For debugging purposes:
// System.out.println("" + this.getClass() + ": " +
// SerDeUtils.getJSONString(row, rowInspector));
// System.out.println("" + this.getClass() + ">> " +
// ObjectInspectorUtils.getObjectInspectorName(rowInspector));
- if (childOperatorsArray == null && childOperators != null) {
- throw new HiveException(
- "Internal Hive error during operator initialization.");
- }
-
- if ((childOperatorsArray == null) || (getDone())) {
- return;
- }
-
- int childrenDone = 0;
for (int i = 0; i < childOperatorsArray.length; i++) {
- Operator extends OperatorDesc> o = childOperatorsArray[i];
- if (o.getDone()) {
- childrenDone++;
- } else {
- o.process(row, childOperatorsTag[i]);
- }
- }
-
- // if all children are done, this operator is also done
- if (childrenDone == childOperatorsArray.length) {
- setDone(true);
+ childOperatorsArray[i].processOp(row, childOperatorsTag[i]);
}
}
@@ -999,174 +925,12 @@ protected static StructObjectInspector initEvaluatorsAndReturnStruct(
outputColName, Arrays.asList(fieldObjectInspectors));
}
- /**
- * All counter stuff below this
- */
-
- /**
- * TODO This is a hack for hadoop 0.17 which only supports enum counters.
- */
- public static enum ProgressCounter {
- CREATED_FILES,
- C1, C2, C3, C4, C5, C6, C7, C8, C9, C10,
- C11, C12, C13, C14, C15, C16, C17, C18, C19, C20,
- C21, C22, C23, C24, C25, C26, C27, C28, C29, C30,
- C31, C32, C33, C34, C35, C36, C37, C38, C39, C40,
- C41, C42, C43, C44, C45, C46, C47, C48, C49, C50,
- C51, C52, C53, C54, C55, C56, C57, C58, C59, C60,
- C61, C62, C63, C64, C65, C66, C67, C68, C69, C70,
- C71, C72, C73, C74, C75, C76, C77, C78, C79, C80,
- C81, C82, C83, C84, C85, C86, C87, C88, C89, C90,
- C91, C92, C93, C94, C95, C96, C97, C98, C99, C100,
- C101, C102, C103, C104, C105, C106, C107, C108, C109, C110,
- C111, C112, C113, C114, C115, C116, C117, C118, C119, C120,
- C121, C122, C123, C124, C125, C126, C127, C128, C129, C130,
- C131, C132, C133, C134, C135, C136, C137, C138, C139, C140,
- C141, C142, C143, C144, C145, C146, C147, C148, C149, C150,
- C151, C152, C153, C154, C155, C156, C157, C158, C159, C160,
- C161, C162, C163, C164, C165, C166, C167, C168, C169, C170,
- C171, C172, C173, C174, C175, C176, C177, C178, C179, C180,
- C181, C182, C183, C184, C185, C186, C187, C188, C189, C190,
- C191, C192, C193, C194, C195, C196, C197, C198, C199, C200,
- C201, C202, C203, C204, C205, C206, C207, C208, C209, C210,
- C211, C212, C213, C214, C215, C216, C217, C218, C219, C220,
- C221, C222, C223, C224, C225, C226, C227, C228, C229, C230,
- C231, C232, C233, C234, C235, C236, C237, C238, C239, C240,
- C241, C242, C243, C244, C245, C246, C247, C248, C249, C250,
- C251, C252, C253, C254, C255, C256, C257, C258, C259, C260,
- C261, C262, C263, C264, C265, C266, C267, C268, C269, C270,
- C271, C272, C273, C274, C275, C276, C277, C278, C279, C280,
- C281, C282, C283, C284, C285, C286, C287, C288, C289, C290,
- C291, C292, C293, C294, C295, C296, C297, C298, C299, C300,
- C301, C302, C303, C304, C305, C306, C307, C308, C309, C310,
- C311, C312, C313, C314, C315, C316, C317, C318, C319, C320,
- C321, C322, C323, C324, C325, C326, C327, C328, C329, C330,
- C331, C332, C333, C334, C335, C336, C337, C338, C339, C340,
- C341, C342, C343, C344, C345, C346, C347, C348, C349, C350,
- C351, C352, C353, C354, C355, C356, C357, C358, C359, C360,
- C361, C362, C363, C364, C365, C366, C367, C368, C369, C370,
- C371, C372, C373, C374, C375, C376, C377, C378, C379, C380,
- C381, C382, C383, C384, C385, C386, C387, C388, C389, C390,
- C391, C392, C393, C394, C395, C396, C397, C398, C399, C400,
- C401, C402, C403, C404, C405, C406, C407, C408, C409, C410,
- C411, C412, C413, C414, C415, C416, C417, C418, C419, C420,
- C421, C422, C423, C424, C425, C426, C427, C428, C429, C430,
- C431, C432, C433, C434, C435, C436, C437, C438, C439, C440,
- C441, C442, C443, C444, C445, C446, C447, C448, C449, C450,
- C451, C452, C453, C454, C455, C456, C457, C458, C459, C460,
- C461, C462, C463, C464, C465, C466, C467, C468, C469, C470,
- C471, C472, C473, C474, C475, C476, C477, C478, C479, C480,
- C481, C482, C483, C484, C485, C486, C487, C488, C489, C490,
- C491, C492, C493, C494, C495, C496, C497, C498, C499, C500,
- C501, C502, C503, C504, C505, C506, C507, C508, C509, C510,
- C511, C512, C513, C514, C515, C516, C517, C518, C519, C520,
- C521, C522, C523, C524, C525, C526, C527, C528, C529, C530,
- C531, C532, C533, C534, C535, C536, C537, C538, C539, C540,
- C541, C542, C543, C544, C545, C546, C547, C548, C549, C550,
- C551, C552, C553, C554, C555, C556, C557, C558, C559, C560,
- C561, C562, C563, C564, C565, C566, C567, C568, C569, C570,
- C571, C572, C573, C574, C575, C576, C577, C578, C579, C580,
- C581, C582, C583, C584, C585, C586, C587, C588, C589, C590,
- C591, C592, C593, C594, C595, C596, C597, C598, C599, C600,
- C601, C602, C603, C604, C605, C606, C607, C608, C609, C610,
- C611, C612, C613, C614, C615, C616, C617, C618, C619, C620,
- C621, C622, C623, C624, C625, C626, C627, C628, C629, C630,
- C631, C632, C633, C634, C635, C636, C637, C638, C639, C640,
- C641, C642, C643, C644, C645, C646, C647, C648, C649, C650,
- C651, C652, C653, C654, C655, C656, C657, C658, C659, C660,
- C661, C662, C663, C664, C665, C666, C667, C668, C669, C670,
- C671, C672, C673, C674, C675, C676, C677, C678, C679, C680,
- C681, C682, C683, C684, C685, C686, C687, C688, C689, C690,
- C691, C692, C693, C694, C695, C696, C697, C698, C699, C700,
- C701, C702, C703, C704, C705, C706, C707, C708, C709, C710,
- C711, C712, C713, C714, C715, C716, C717, C718, C719, C720,
- C721, C722, C723, C724, C725, C726, C727, C728, C729, C730,
- C731, C732, C733, C734, C735, C736, C737, C738, C739, C740,
- C741, C742, C743, C744, C745, C746, C747, C748, C749, C750,
- C751, C752, C753, C754, C755, C756, C757, C758, C759, C760,
- C761, C762, C763, C764, C765, C766, C767, C768, C769, C770,
- C771, C772, C773, C774, C775, C776, C777, C778, C779, C780,
- C781, C782, C783, C784, C785, C786, C787, C788, C789, C790,
- C791, C792, C793, C794, C795, C796, C797, C798, C799, C800,
- C801, C802, C803, C804, C805, C806, C807, C808, C809, C810,
- C811, C812, C813, C814, C815, C816, C817, C818, C819, C820,
- C821, C822, C823, C824, C825, C826, C827, C828, C829, C830,
- C831, C832, C833, C834, C835, C836, C837, C838, C839, C840,
- C841, C842, C843, C844, C845, C846, C847, C848, C849, C850,
- C851, C852, C853, C854, C855, C856, C857, C858, C859, C860,
- C861, C862, C863, C864, C865, C866, C867, C868, C869, C870,
- C871, C872, C873, C874, C875, C876, C877, C878, C879, C880,
- C881, C882, C883, C884, C885, C886, C887, C888, C889, C890,
- C891, C892, C893, C894, C895, C896, C897, C898, C899, C900,
- C901, C902, C903, C904, C905, C906, C907, C908, C909, C910,
- C911, C912, C913, C914, C915, C916, C917, C918, C919, C920,
- C921, C922, C923, C924, C925, C926, C927, C928, C929, C930,
- C931, C932, C933, C934, C935, C936, C937, C938, C939, C940,
- C941, C942, C943, C944, C945, C946, C947, C948, C949, C950,
- C951, C952, C953, C954, C955, C956, C957, C958, C959, C960,
- C961, C962, C963, C964, C965, C966, C967, C968, C969, C970,
- C971, C972, C973, C974, C975, C976, C977, C978, C979, C980,
- C981, C982, C983, C984, C985, C986, C987, C988, C989, C990,
- C991, C992, C993, C994, C995, C996, C997, C998, C999, C1000
- };
-
- private static int totalNumCntrs = 1000;
-
- /**
- * populated at runtime from hadoop counters at run time in the client.
- */
- protected transient HashMap counters;
-
- /**
- * keeps track of unique ProgressCounter enums used this value is used at
- * compile time while assigning ProgressCounter enums to counter names.
- */
- private static int lastEnumUsed;
-
- protected transient long inputRows = 0;
- protected transient long outputRows = 0;
- protected transient long beginTime = 0;
- protected transient long totalTime = 0;
-
protected transient Object groupKeyObject;
- /**
- * this is called in operators in map or reduce tasks.
- *
- * @param name
- * @param amount
- */
- protected void incrCounter(String name, long amount) {
- String counterName = getWrappedCounterName(name);
- ProgressCounter pc = counterNameToEnum.get(counterName);
-
- // Currently, we maintain fixed number of counters per plan - in case of a
- // bigger tree, we may run out of them
- if (pc == null) {
- LOG
- .warn("Using too many counters. Increase the total number of counters for "
- + counterName);
- } else if (reporter != null) {
- reporter.incrCounter(pc, amount);
- }
- }
-
- public ArrayList getCounterNames() {
- return counterNames;
- }
-
- public void setCounterNames(ArrayList counterNames) {
- this.counterNames = counterNames;
- }
-
public String getOperatorId() {
return operatorId;
}
- public final String getWrappedCounterName(String ctrName) {
- return String.format(counterNameFormat, getOperatorId(), ctrName);
- }
-
public void initOperatorId() {
setOperatorId(getName() + "_" + this.id);
}
@@ -1175,81 +939,6 @@ public void setOperatorId(String operatorId) {
this.operatorId = operatorId;
}
- public HashMap getCounters() {
- return counters;
- }
-
- /**
- * called in ExecDriver.progress periodically.
- *
- * @param ctrs
- * counters from the running job
- */
- @SuppressWarnings("unchecked")
- public void updateCounters(Counters ctrs) {
- if (counters == null) {
- counters = new HashMap();
- }
-
- // For some old unit tests, the counters will not be populated. Eventually,
- // the old tests should be removed
- if (counterNameToEnum == null) {
- return;
- }
-
- for (Map.Entry counter : counterNameToEnum
- .entrySet()) {
- counters.put(counter.getKey(), ctrs.getCounter(counter.getValue()));
- }
- // update counters of child operators
- // this wont be an infinite loop since the operator graph is acyclic
- // but, some operators may be updated more than once and that's ok
- if (getChildren() != null) {
- for (Node op : getChildren()) {
- ((Operator extends OperatorDesc>) op).updateCounters(ctrs);
- }
- }
- }
-
- /**
- * Recursively check this operator and its descendants to see if the fatal
- * error counter is set to non-zero.
- *
- * @param ctrs
- */
- public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
- if (counterNameToEnum == null) {
- return false;
- }
-
- String counterName = getWrappedCounterName(fatalErrorCntr);
- ProgressCounter pc = counterNameToEnum.get(counterName);
-
- // Currently, we maintain fixed number of counters per plan - in case of a
- // bigger tree, we may run out of them
- if (pc == null) {
- LOG
- .warn("Using too many counters. Increase the total number of counters for "
- + counterName);
- } else {
- long value = ctrs.getCounter(pc);
- fatalErrorMessage(errMsg, value);
- if (value != 0) {
- return true;
- }
- }
-
- if (getChildren() != null) {
- for (Node op : getChildren()) {
- if (((Operator extends OperatorDesc>) op).checkFatalErrors(ctrs,
- errMsg)) {
- return true;
- }
- }
- }
- return false;
- }
-
/**
* Get the fatal error message based on counter's code.
*
@@ -1261,59 +950,6 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) {
protected void fatalErrorMessage(StringBuilder errMsg, long counterValue) {
}
- // A given query can have multiple map-reduce jobs
- public static void resetLastEnumUsed() {
- lastEnumUsed = 0;
- }
-
- /**
- * Called only in SemanticAnalyzer after all operators have added their own
- * set of counter names.
- */
- public void assignCounterNameToEnum() {
- if (counterNameToEnum != null) {
- return;
- }
- counterNameToEnum = new HashMap();
- for (String counterName : getCounterNames()) {
- ++lastEnumUsed;
-
- // TODO Hack for hadoop-0.17
- // Currently, only maximum number of 'totalNumCntrs' can be used. If you
- // want
- // to add more counters, increase the number of counters in
- // ProgressCounter
- if (lastEnumUsed > totalNumCntrs) {
- LOG
- .warn("Using too many counters. Increase the total number of counters");
- return;
- }
- String enumName = "C" + lastEnumUsed;
- ProgressCounter ctr = ProgressCounter.valueOf(enumName);
- counterNameToEnum.put(counterName, ctr);
- }
- }
-
- protected static String numInputRowsCntr = "NUM_INPUT_ROWS";
- protected static String numOutputRowsCntr = "NUM_OUTPUT_ROWS";
- protected static String timeTakenCntr = "TIME_TAKEN";
- protected static String fatalErrorCntr = "FATAL_ERROR";
- private static String counterNameFormat = "CNTR_NAME_%s_%s";
-
- public void initializeCounters() {
- initOperatorId();
- counterNames = new ArrayList();
- counterNames.add(getWrappedCounterName(numInputRowsCntr));
- counterNames.add(getWrappedCounterName(numOutputRowsCntr));
- counterNames.add(getWrappedCounterName(timeTakenCntr));
- counterNames.add(getWrappedCounterName(fatalErrorCntr));
- /* getAdditionalCounter should return Wrapped counters */
- List newCntrs = getAdditionalCounters();
- if (newCntrs != null) {
- counterNames.addAll(newCntrs);
- }
- }
-
/*
* By default, the list is empty - if an operator wants to add more counters,
* it should override this method and provide the new list. Counter names returned
@@ -1324,15 +960,6 @@ public void initializeCounters() {
return null;
}
- public HashMap getCounterNameToEnum() {
- return counterNameToEnum;
- }
-
- public void setCounterNameToEnum(
- HashMap counterNameToEnum) {
- this.counterNameToEnum = counterNameToEnum;
- }
-
/**
* Return the type of the specific operator among the
* types in OperatorType.
@@ -1406,6 +1033,7 @@ public boolean supportSkewJoinOptimization() {
}
}
+ @SuppressWarnings("unchecked")
T descClone = (T)conf.clone();
Operator extends OperatorDesc> ret =
(Operator extends OperatorDesc>) OperatorFactory.getAndMakeChild(
@@ -1480,6 +1108,7 @@ public boolean opAllowedBeforeSortMergeJoin() {
return true;
}
+ @Override
public String toString() {
return getName() + "[" + getIdentifier() + "]";
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
index bbbcfbf..b537b9f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java
@@ -24,8 +24,8 @@
import org.apache.hadoop.hive.ql.plan.CollectDesc;
import org.apache.hadoop.hive.ql.plan.DummyStoreDesc;
-import org.apache.hadoop.hive.ql.plan.ExtractDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExtractDesc;
import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
import org.apache.hadoop.hive.ql.plan.FilterDesc;
import org.apache.hadoop.hive.ql.plan.ForwardDesc;
@@ -106,7 +106,6 @@ public OpTuple(Class descClass, Class extends Operator> opClass) {
if (o.descClass == opClass) {
try {
Operator op = (Operator) o.opClass.newInstance();
- op.initializeCounters();
return op;
} catch (Exception e) {
e.printStackTrace();
@@ -257,7 +256,7 @@ public static void makeChild(
public static Operator getAndMakeChild(T conf,
RowSchema rwsch, Map colExprMap, Operator... oplist) {
Operator ret = getAndMakeChild(conf, rwsch, oplist);
- ret.setColumnExprMap(colExprMap);
+ ret.setColumnExprMap(colExprMap);
return (ret);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 6a538e8..d5f489f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -296,15 +296,6 @@ public void processOp(Object row, int tag) throws HiveException {
keyWritable.setHashCode(keyHashCode);
if (out != null) {
out.collect(keyWritable, value);
- // Since this is a terminal operator, update counters explicitly -
- // forward is not called
- if (counterNameToEnum != null) {
- ++outputRows;
- if (outputRows % 1000 == 0) {
- incrCounter(numOutputRowsCntr, outputRows);
- outputRows = 0;
- }
- }
}
}
} catch (SerDeException e) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
index 7ce221c..cea07f2 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java
@@ -125,6 +125,7 @@ protected void initializeOp(Configuration hconf) throws HiveException {
int bucketSize = HiveConf.getIntVar(hconf,
HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE);
+
for (byte pos = 0; pos < order.length; pos++) {
RowContainer rc = JoinUtil.getRowContainer(hconf,
rowContainerStandardObjectInspectors[pos],
@@ -544,7 +545,7 @@ private void fetchOneRow(byte tag) {
fetchDone[tag] = true;
return;
}
- forwardOp.process(row.o, tag);
+ forwardOp.processOp(row.o, tag);
// check if any operator had a fatal error or early exit during
// execution
if (forwardOp.getDone()) {
@@ -789,7 +790,7 @@ private boolean next(Integer current) throws IOException, HiveException {
// Pass the row though the operator tree. It is guaranteed that not more than 1 row can
// be produced from a input row.
- forwardOp.process(nextRow.o, 0);
+ forwardOp.processOp(nextRow.o, 0);
nextRow = sinkOp.getResult();
// It is possible that the row got absorbed in the operator tree.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 100db8a..530049d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -121,9 +121,9 @@
import org.apache.hadoop.hive.ql.plan.PartitionDesc;
import org.apache.hadoop.hive.ql.plan.PlanUtils;
import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes;
+import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.plan.api.Adjacency;
import org.apache.hadoop.hive.ql.plan.api.Graph;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.stats.StatsFactory;
import org.apache.hadoop.hive.ql.stats.StatsPublisher;
@@ -234,15 +234,18 @@ public static MapredWork getMapRedWork(Configuration job) {
public static void setWorkflowAdjacencies(Configuration conf, QueryPlan plan) {
try {
Graph stageGraph = plan.getQueryPlan().getStageGraph();
- if (stageGraph == null)
+ if (stageGraph == null) {
return;
+ }
List adjList = stageGraph.getAdjacencyList();
- if (adjList == null)
+ if (adjList == null) {
return;
+ }
for (Adjacency adj : adjList) {
List children = adj.getChildren();
- if (children == null || children.isEmpty())
+ if (children == null || children.isEmpty()) {
return;
+ }
conf.setStrings("mapreduce.workflow.adjacency."+adj.getNode(),
children.toArray(new String[children.size()]));
}
@@ -451,7 +454,6 @@ public static void serializeTasks(Task extends Serializable> t, OutputStream o
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
- e.setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate());
e.writeObject(t);
} finally {
@@ -490,7 +492,6 @@ public void exceptionThrown(Exception e) {
// workaround for java 1.5
e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate());
e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate());
- e.setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate());
e.setPersistenceDelegate(org.datanucleus.sco.backed.Map.class, new MapDelegate());
e.setPersistenceDelegate(org.datanucleus.sco.backed.List.class, new ListDelegate());
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
index ee667ce..2dce773 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/merge/BlockMergeTask.java
@@ -375,11 +375,6 @@ public void logPlanProgress(SessionState ss) throws IOException {
}
@Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
-
- @Override
protected void localizeMRTmpFilesImpl(Context ctx) {
// no op
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index 767cb02..152a241 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -376,11 +376,6 @@ public void logPlanProgress(SessionState ss) throws IOException {
}
@Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
-
- @Override
protected void localizeMRTmpFilesImpl(Context ctx) {
// no op
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
index e9805bb..978f890 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java
@@ -251,11 +251,6 @@ public void logPlanProgress(SessionState ss) throws IOException {
}
@Override
- public void updateCounters(Counters ctrs, RunningJob rj) throws IOException {
- // no op
- }
-
- @Override
protected void localizeMRTmpFilesImpl(Context ctx) {
// no op
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
new file mode 100644
index 0000000..9de5b2b
--- /dev/null
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
@@ -0,0 +1,20 @@
+package org.apache.hadoop.hive.ql.metadata;
+
+public class HiveFatalException extends HiveException {
+
+ public HiveFatalException() {
+ }
+
+ public HiveFatalException(String message) {
+ super(message);
+ }
+
+ public HiveFatalException(Throwable cause) {
+ super(cause);
+ }
+
+ public HiveFatalException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+}
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index a7a7254..c7b48ce 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -4965,11 +4965,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input)
qbm.setDPCtx(dest, dpCtx);
}
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
- // turn on hive.task.progress to update # of partitions created to the JT
- HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS, true);
-
- } else { // QBMetaData.DEST_PARTITION capture the all-SP case
+ if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP
throw new SemanticException(generateErrorMessage(
qb.getParseInfo().getDestForClause(dest),
ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg()));
@@ -8430,13 +8426,6 @@ private void genMapRedTasks(ParseContext pCtx) throws SemanticException {
physicalContext, conf);
physicalOptimizer.optimize();
- // For each operator, generate the counters if needed
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS)) {
- for (Task extends Serializable> rootTask : rootTasks) {
- generateCountersTask(rootTask);
- }
- }
-
decideExecMode(rootTasks, ctx, globalLimitCtx);
if (qb.isCTAS()) {
@@ -8537,9 +8526,6 @@ private void generateCountersTask(Task extends Serializable> task) {
}
}
- // Start the counters from scratch - a hack for hadoop 17.
- Operator.resetLastEnumUsed();
-
if (task.getChildTasks() == null) {
return;
}
@@ -8550,8 +8536,6 @@ private void generateCountersTask(Task extends Serializable> task) {
}
private void generateCountersOperator(Operator extends OperatorDesc> op) {
- op.assignCounterNameToEnum();
-
if (op.getChildOperators() == null) {
return;
}
diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index 79bed09..75801dc 100644
--- ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -116,7 +116,7 @@ public void testBaseFilterOperator() throws Throwable {
new ObjectInspector[] {r[0].oi});
for (InspectableObject oner : r) {
- op.process(oner.o, 0);
+ op.processOp(oner.o, 0);
}
Map, Long> results = op.getStats();
@@ -277,7 +277,7 @@ public void testScriptOperator() throws Throwable {
// evaluate on row
for (int i = 0; i < 5; i++) {
- op.process(r[i].o, 0);
+ op.processOp(r[i].o, 0);
}
op.close(false);
diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java
deleted file mode 100644
index 828de5e..0000000
--- ql/src/test/org/apache/hadoop/hive/ql/hooks/OptrStatGroupByHook.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- **/
-package org.apache.hadoop.hive.ql.hooks;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.Set;
-import java.io.Serializable;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.Task;
-import org.apache.hadoop.hive.ql.exec.TaskRunner;
-import org.apache.hadoop.hive.ql.plan.api.OperatorType;
-import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
-
-public class OptrStatGroupByHook implements ExecuteWithHookContext {
-
- public void run(HookContext hookContext) {
- HiveConf conf = hookContext.getConf();
-
- List completedTasks = hookContext.getCompleteTaskList();
-
- boolean enableProgress = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEJOBPROGRESS);
-
- /** For each task visit the opeartor tree and and if the operator is GROUPBY
- * then print the HASH_OUT Optr level stat value.
- **/
- if (completedTasks != null) {
- for (TaskRunner taskRunner : completedTasks) {
- Task extends Serializable> task = taskRunner.getTask();
- if (task.isMapRedTask() && !task.isMapRedLocalTask()) {
- Set> optrSet = getOptrsForTask(task);
- for (Operator extends OperatorDesc> optr : optrSet) {
- if (optr.getType() == OperatorType.GROUPBY) {
- printCounterValue(optr.getCounters());
- }
- }
- }
- }
- }
- }
-
- private void printCounterValue(HashMap ctrs) {
- for (String ctrName : ctrs.keySet()) {
- if (ctrName.contains("HASH_OUT")) {
- SessionState.getConsole().printError(ctrName+"="+ctrs.get(ctrName));
- }
- }
- }
-
- private Set> getOptrsForTask(
- Task extends Serializable> task) {
-
- Collection> topOptrs = task.getTopOperators();
- Set> allOptrs =
- new HashSet>();
- Queue> opsToVisit =
- new LinkedList>();
- if(topOptrs != null) {
- opsToVisit.addAll(topOptrs);
- addChildOptrs(opsToVisit, allOptrs);
- }
-
- return allOptrs;
- }
-
- private void addChildOptrs(
- Queue> opsToVisit,
- Set> opsVisited) {
-
- if(opsToVisit == null || opsVisited == null) {
- return;
- }
-
- while (opsToVisit.peek() != null) {
- Operator extends OperatorDesc> op = opsToVisit.remove();
- opsVisited.add(op);
- if (op.getChildOperators() != null) {
- for (Operator extends OperatorDesc> childOp : op.getChildOperators()) {
- if (!opsVisited.contains(childOp)) {
- opsToVisit.add(childOp);
- }
- }
- }
- }
- }
-}
diff --git ql/src/test/queries/clientpositive/optrstat_groupby.q ql/src/test/queries/clientpositive/optrstat_groupby.q
deleted file mode 100644
index 5993041..0000000
--- ql/src/test/queries/clientpositive/optrstat_groupby.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.OptrStatGroupByHook;
-SET hive.exec.mode.local.auto=false;
-SET hive.task.progress=true;
--- This test executes the OptrStatGroupBy hook which prints the optr level
--- stats of GROUPBY optr present is the plan of below query
-SELECT count(1) FROM src;
diff --git ql/src/test/results/clientnegative/mapreduce_stack_trace_hadoop20.q.out ql/src/test/results/clientnegative/mapreduce_stack_trace_hadoop20.q.out
index 592ecda..f304672 100644
--- ql/src/test/results/clientnegative/mapreduce_stack_trace_hadoop20.q.out
+++ ql/src/test/results/clientnegative/mapreduce_stack_trace_hadoop20.q.out
@@ -2,12 +2,16 @@ PREHOOK: query: FROM src SELECT TRANSFORM(key, value) USING 'script_does_not_exi
PREHOOK: type: QUERY
PREHOOK: Input: default@src
#### A masked pattern was here ####
+run program "script_does_not_exist": error=2, No such file or directory
FATAL ExecMapper: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row {"key":"238","value":"val_238"}
Hive Runtime Error while processing row {"key":"238","value":"val_238"}
+run program "script_does_not_exist": error=2, No such file or directory
FATAL ExecMapper: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row {"key":"238","value":"val_238"}
Hive Runtime Error while processing row {"key":"238","value":"val_238"}
+run program "script_does_not_exist": error=2, No such file or directory
FATAL ExecMapper: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row {"key":"238","value":"val_238"}
Hive Runtime Error while processing row {"key":"238","value":"val_238"}
+run program "script_does_not_exist": error=2, No such file or directory
FATAL ExecMapper: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row {"key":"238","value":"val_238"}
Hive Runtime Error while processing row {"key":"238","value":"val_238"}
FAILED: Execution Error, return code 20000 from org.apache.hadoop.hive.ql.exec.MapRedTask. Unable to initialize custom script.
diff --git ql/src/test/results/clientpositive/optrstat_groupby.q.out ql/src/test/results/clientpositive/optrstat_groupby.q.out
deleted file mode 100644
index b80a03f..0000000
--- ql/src/test/results/clientpositive/optrstat_groupby.q.out
+++ /dev/null
@@ -1,8 +0,0 @@
-PREHOOK: query: -- This test executes the OptrStatGroupBy hook which prints the optr level
--- stats of GROUPBY optr present is the plan of below query
-SELECT count(1) FROM src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-CNTR_NAME_GBY_2_COUNT_HASH_OUT=1
-500
diff --git ql/src/test/results/compiler/plan/case_sensitivity.q.xml ql/src/test/results/compiler/plan/case_sensitivity.q.xml
index 25dc6fd..c4c4f19 100644
--- ql/src/test/results/compiler/plan/case_sensitivity.q.xml
+++ ql/src/test/results/compiler/plan/case_sensitivity.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -929,22 +881,6 @@
-
-
-
SEL_2
@@ -1056,22 +992,6 @@
-
-
-
FIL_4
@@ -1130,22 +1050,6 @@
-
-
-
-
-
-
FS_3
@@ -758,22 +742,6 @@
-
-
-
SEL_2
@@ -932,22 +900,6 @@
-
-
-
FIL_4
@@ -993,22 +945,6 @@
-
-
-
-
-
-
RS_3
@@ -655,25 +639,6 @@
-
-
-
GBY_2
@@ -839,22 +804,6 @@
-
-
-
SEL_1
@@ -913,22 +862,6 @@
-
-
-
-
-
-
FS_6
@@ -1362,22 +1279,6 @@
-
-
-
SEL_5
@@ -1503,25 +1404,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/groupby2.q.xml ql/src/test/results/compiler/plan/groupby2.q.xml
index 13cca32..2993a92 100755
--- ql/src/test/results/compiler/plan/groupby2.q.xml
+++ ql/src/test/results/compiler/plan/groupby2.q.xml
@@ -362,22 +362,6 @@
-
-
-
RS_3
@@ -741,25 +725,6 @@
-
-
-
GBY_2
@@ -947,22 +912,6 @@
-
-
-
SEL_1
@@ -1021,22 +970,6 @@
-
-
-
-
-
-
FS_6
@@ -1555,22 +1472,6 @@
-
-
-
SEL_5
@@ -1753,25 +1654,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/groupby3.q.xml ql/src/test/results/compiler/plan/groupby3.q.xml
index 06f0864..0043309 100644
--- ql/src/test/results/compiler/plan/groupby3.q.xml
+++ ql/src/test/results/compiler/plan/groupby3.q.xml
@@ -403,22 +403,6 @@
-
-
-
RS_3
@@ -936,25 +920,6 @@
-
-
-
GBY_2
@@ -1152,22 +1117,6 @@
-
-
-
SEL_1
@@ -1213,22 +1162,6 @@
-
-
-
-
-
-
FS_6
@@ -1783,22 +1700,6 @@
-
-
-
SEL_5
@@ -2078,25 +1979,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/groupby4.q.xml ql/src/test/results/compiler/plan/groupby4.q.xml
index 21deeb9..818b9e9 100644
--- ql/src/test/results/compiler/plan/groupby4.q.xml
+++ ql/src/test/results/compiler/plan/groupby4.q.xml
@@ -292,22 +292,6 @@
-
-
-
RS_3
@@ -444,25 +428,6 @@
-
-
-
GBY_2
@@ -599,22 +564,6 @@
-
-
-
SEL_1
@@ -660,22 +609,6 @@
-
-
-
-
-
-
FS_6
@@ -1098,22 +1015,6 @@
-
-
-
SEL_5
@@ -1199,25 +1100,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/groupby5.q.xml ql/src/test/results/compiler/plan/groupby5.q.xml
index 521ee86..d1c57a1 100644
--- ql/src/test/results/compiler/plan/groupby5.q.xml
+++ ql/src/test/results/compiler/plan/groupby5.q.xml
@@ -314,22 +314,6 @@
-
-
-
RS_3
@@ -502,25 +486,6 @@
-
-
-
GBY_2
@@ -686,22 +651,6 @@
-
-
-
SEL_1
@@ -760,22 +709,6 @@
-
-
-
-
-
-
FS_6
@@ -1224,22 +1141,6 @@
-
-
-
SEL_5
@@ -1371,25 +1272,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/groupby6.q.xml ql/src/test/results/compiler/plan/groupby6.q.xml
index b50d796..d9ae76b 100644
--- ql/src/test/results/compiler/plan/groupby6.q.xml
+++ ql/src/test/results/compiler/plan/groupby6.q.xml
@@ -292,22 +292,6 @@
-
-
-
RS_3
@@ -444,25 +428,6 @@
-
-
-
GBY_2
@@ -599,22 +564,6 @@
-
-
-
SEL_1
@@ -660,22 +609,6 @@
-
-
-
-
-
-
FS_6
@@ -1098,22 +1015,6 @@
-
-
-
SEL_5
@@ -1199,25 +1100,6 @@
-
-
-
GBY_4
diff --git ql/src/test/results/compiler/plan/input1.q.xml ql/src/test/results/compiler/plan/input1.q.xml
index 779d0d5..2ee0353 100755
--- ql/src/test/results/compiler/plan/input1.q.xml
+++ ql/src/test/results/compiler/plan/input1.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -821,22 +773,6 @@
-
-
-
SEL_2
@@ -934,22 +870,6 @@
-
-
-
FIL_4
@@ -1008,22 +928,6 @@
-
-
-
-
-
-
FS_11
@@ -224,22 +208,6 @@
-
-
-
TS_10
@@ -677,22 +645,6 @@
-
-
-
FS_13
@@ -741,22 +693,6 @@
-
-
-
TS_12
@@ -1198,22 +1134,6 @@
-
-
-
FS_15
@@ -1262,22 +1182,6 @@
-
-
-
TS_14
@@ -1779,22 +1683,6 @@
-
-
-
FS_3
@@ -1871,22 +1759,6 @@
-
-
-
SEL_2
@@ -1984,22 +1856,6 @@
-
-
-
FIL_1
@@ -2090,22 +1946,6 @@
-
-
-
FS_6
@@ -2179,22 +2019,6 @@
-
-
-
SEL_5
@@ -2335,22 +2159,6 @@
-
-
-
FIL_4
@@ -2424,22 +2232,6 @@
-
-
-
FS_9
@@ -2514,22 +2306,6 @@
-
-
-
SEL_8
@@ -2616,22 +2392,6 @@
-
-
-
FIL_7
@@ -2667,22 +2427,6 @@
-
-
-
-
-
-
RS_3
@@ -511,22 +495,6 @@
-
-
-
SCR_2
@@ -686,22 +654,6 @@
-
-
-
SEL_1
@@ -754,22 +706,6 @@
-
-
-
-
-
-
FS_7
@@ -1298,22 +1218,6 @@
-
-
-
SCR_6
@@ -1418,22 +1322,6 @@
-
-
-
SEL_5
@@ -1493,22 +1381,6 @@
-
-
-
EX_4
diff --git ql/src/test/results/compiler/plan/input3.q.xml ql/src/test/results/compiler/plan/input3.q.xml
index a7e2ce3..8d21933 100755
--- ql/src/test/results/compiler/plan/input3.q.xml
+++ ql/src/test/results/compiler/plan/input3.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_14
@@ -224,22 +208,6 @@
-
-
-
TS_13
@@ -677,22 +645,6 @@
-
-
-
FS_16
@@ -741,22 +693,6 @@
-
-
-
TS_15
@@ -1198,22 +1134,6 @@
-
-
-
FS_18
@@ -1262,22 +1182,6 @@
-
-
-
TS_17
@@ -1657,22 +1561,6 @@
-
-
-
FS_20
@@ -1708,22 +1596,6 @@
-
-
-
TS_19
@@ -2156,22 +2028,6 @@
-
-
-
FS_3
@@ -2248,22 +2104,6 @@
-
-
-
SEL_2
@@ -2361,22 +2201,6 @@
-
-
-
FIL_1
@@ -2467,22 +2291,6 @@
-
-
-
FS_6
@@ -2556,22 +2364,6 @@
-
-
-
SEL_5
@@ -2712,22 +2504,6 @@
-
-
-
FIL_4
@@ -2801,22 +2577,6 @@
-
-
-
FS_9
@@ -2891,22 +2651,6 @@
-
-
-
SEL_8
@@ -3044,22 +2788,6 @@
-
-
-
FIL_7
@@ -3118,22 +2846,6 @@
-
-
-
FS_12
@@ -3187,22 +2899,6 @@
-
-
-
SEL_11
@@ -3282,22 +2978,6 @@
-
-
-
FIL_10
@@ -3336,22 +3016,6 @@
-
-
-
-
-
-
RS_3
@@ -592,22 +576,6 @@
-
-
-
FIL_8
@@ -740,22 +708,6 @@
-
-
-
SCR_2
@@ -833,22 +785,6 @@
-
-
-
SEL_1
@@ -901,22 +837,6 @@
-
-
-
-
-
-
FS_7
@@ -1374,22 +1278,6 @@
-
-
-
SEL_6
@@ -1455,22 +1343,6 @@
-
-
-
EX_4
diff --git ql/src/test/results/compiler/plan/input5.q.xml ql/src/test/results/compiler/plan/input5.q.xml
index 2543cc4..b5afb88 100644
--- ql/src/test/results/compiler/plan/input5.q.xml
+++ ql/src/test/results/compiler/plan/input5.q.xml
@@ -493,22 +493,6 @@
-
-
-
RS_3
@@ -668,22 +652,6 @@
-
-
-
SCR_2
@@ -800,22 +768,6 @@
-
-
-
SEL_1
@@ -868,22 +820,6 @@
-
-
-
-
-
-
FS_6
@@ -1412,22 +1332,6 @@
-
-
-
SEL_5
@@ -1493,22 +1397,6 @@
-
-
-
EX_4
diff --git ql/src/test/results/compiler/plan/input6.q.xml ql/src/test/results/compiler/plan/input6.q.xml
index eaeefda..11d72ad 100644
--- ql/src/test/results/compiler/plan/input6.q.xml
+++ ql/src/test/results/compiler/plan/input6.q.xml
@@ -156,22 +156,6 @@
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -821,22 +773,6 @@
-
-
-
SEL_2
@@ -920,22 +856,6 @@
-
-
-
FIL_4
@@ -994,22 +914,6 @@
-
-
-
-
-
-
FS_4
@@ -224,22 +208,6 @@
-
-
-
TS_3
@@ -728,22 +696,6 @@
-
-
-
FS_2
@@ -811,22 +763,6 @@
-
-
-
SEL_1
@@ -886,22 +822,6 @@
-
-
-
-
-
-
FS_2
@@ -467,22 +451,6 @@
-
-
-
SEL_1
@@ -554,22 +522,6 @@
-
-
-
-
-
-
FS_6
@@ -224,22 +208,6 @@
-
-
-
TS_5
@@ -732,22 +700,6 @@
-
-
-
FS_3
@@ -815,22 +767,6 @@
-
-
-
SEL_2
@@ -916,22 +852,6 @@
-
-
-
FIL_4
@@ -977,22 +897,6 @@
-
-
-
-
-
-
FS_3
@@ -458,22 +442,6 @@
-
-
-
SEL_2
@@ -609,22 +577,6 @@
-
-
-
FIL_4
@@ -709,22 +661,6 @@
-
-
-
-
-
-
FS_4
@@ -224,22 +208,6 @@
-
-
-
TS_3
@@ -728,22 +696,6 @@
-
-
-
FS_2
@@ -817,22 +769,6 @@
-
-
-
SEL_1
@@ -891,22 +827,6 @@
-
-
-
-
-
-
FS_2
@@ -517,22 +501,6 @@
-
-
-
SEL_1
@@ -604,22 +572,6 @@
-
-
-
-
-
-
FS_3
@@ -478,22 +462,6 @@
-
-
-
SEL_2
@@ -645,22 +613,6 @@
-
-
-
FIL_4
@@ -732,22 +684,6 @@
-
-
-
-
-
-
src2
@@ -667,22 +651,6 @@
-
-
-
-
-
-
src1
@@ -969,22 +921,6 @@
-
-
-
-
-
-
FS_6
@@ -1441,22 +1361,6 @@
-
-
-
SEL_5
@@ -1649,22 +1553,6 @@
-
-
-
JOIN_4
diff --git ql/src/test/results/compiler/plan/join2.q.xml ql/src/test/results/compiler/plan/join2.q.xml
index 91656a6..dd0b28f 100644
--- ql/src/test/results/compiler/plan/join2.q.xml
+++ ql/src/test/results/compiler/plan/join2.q.xml
@@ -507,22 +507,6 @@
-
-
-
RS_6
@@ -558,22 +542,6 @@
-
-
-
TS_12
@@ -777,22 +745,6 @@
-
-
-
src3
@@ -841,22 +793,6 @@
-
-
-
-
-
-
FS_10
@@ -1374,22 +1294,6 @@
-
-
-
SEL_9
@@ -1598,22 +1502,6 @@
-
-
-
JOIN_8
@@ -2147,22 +2035,6 @@
-
-
-
src2
@@ -2211,22 +2083,6 @@
-
-
-
-
-
-
src1
@@ -2506,22 +2346,6 @@
-
-
-
-
-
-
FS_11
@@ -3007,22 +2815,6 @@
-
-
-
JOIN_5
diff --git ql/src/test/results/compiler/plan/join3.q.xml ql/src/test/results/compiler/plan/join3.q.xml
index 603107c..15bd3f0 100644
--- ql/src/test/results/compiler/plan/join3.q.xml
+++ ql/src/test/results/compiler/plan/join3.q.xml
@@ -728,22 +728,6 @@
-
-
-
src2
@@ -778,22 +762,6 @@
-
-
-
-
-
-
src3
@@ -1080,22 +1032,6 @@
-
-
-
-
-
-
src1
@@ -1378,22 +1298,6 @@
-
-
-
-
-
-
FS_8
@@ -1853,22 +1741,6 @@
-
-
-
SEL_7
@@ -2095,22 +1967,6 @@
-
-
-
JOIN_6
diff --git ql/src/test/results/compiler/plan/join4.q.xml ql/src/test/results/compiler/plan/join4.q.xml
index 391b58d..2631f4d 100644
--- ql/src/test/results/compiler/plan/join4.q.xml
+++ ql/src/test/results/compiler/plan/join4.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1944,22 +1800,6 @@
-
-
-
SEL_9
@@ -2222,22 +2062,6 @@
-
-
-
JOIN_8
diff --git ql/src/test/results/compiler/plan/join5.q.xml ql/src/test/results/compiler/plan/join5.q.xml
index 2669097..0274337 100644
--- ql/src/test/results/compiler/plan/join5.q.xml
+++ ql/src/test/results/compiler/plan/join5.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1944,22 +1800,6 @@
-
-
-
SEL_9
@@ -2218,22 +2058,6 @@
-
-
-
JOIN_8
diff --git ql/src/test/results/compiler/plan/join6.q.xml ql/src/test/results/compiler/plan/join6.q.xml
index b92d70b..73c3fb2 100644
--- ql/src/test/results/compiler/plan/join6.q.xml
+++ ql/src/test/results/compiler/plan/join6.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -747,22 +715,6 @@
-
-
-
FIL_12
@@ -821,22 +773,6 @@
-
-
-
-
-
-
b
@@ -1184,22 +1104,6 @@
-
-
-
SEL_2
@@ -1334,22 +1238,6 @@
-
-
-
FIL_13
@@ -1408,22 +1296,6 @@
-
-
-
-
-
-
FS_11
@@ -1944,22 +1800,6 @@
-
-
-
SEL_9
@@ -2225,22 +2065,6 @@
-
-
-
JOIN_8
diff --git ql/src/test/results/compiler/plan/join7.q.xml ql/src/test/results/compiler/plan/join7.q.xml
index d6253de..b0f7276 100644
--- ql/src/test/results/compiler/plan/join7.q.xml
+++ ql/src/test/results/compiler/plan/join7.q.xml
@@ -614,22 +614,6 @@
-
-
-
a
@@ -737,22 +721,6 @@
-
-
-
SEL_8
@@ -895,22 +863,6 @@
-
-
-
FIL_16
@@ -969,22 +921,6 @@
-
-
-
-
-
-
b
@@ -1332,22 +1252,6 @@
-
-
-
SEL_2
@@ -1482,22 +1386,6 @@
-
-
-
FIL_17
@@ -1556,22 +1444,6 @@
-
-
-
-
-
-
c
@@ -1915,22 +1771,6 @@
-
-
-
SEL_5
@@ -2065,22 +1905,6 @@
-
-
-
FIL_18
@@ -2139,22 +1963,6 @@
-
-
-
-
-
-
FS_15
@@ -2744,22 +2536,6 @@
-
-
-
SEL_13
@@ -3130,22 +2906,6 @@
-
-
-
JOIN_12
diff --git ql/src/test/results/compiler/plan/join8.q.xml ql/src/test/results/compiler/plan/join8.q.xml
index e1e71a7..e04f1dc 100644
--- ql/src/test/results/compiler/plan/join8.q.xml
+++ ql/src/test/results/compiler/plan/join8.q.xml
@@ -466,22 +466,6 @@
-
-
-
a
@@ -589,22 +573,6 @@
-
-
-
SEL_5
@@ -788,22 +756,6 @@
-
-
-
FIL_14
@@ -862,22 +814,6 @@
-
-
-
-
-
-
b
@@ -1225,22 +1145,6 @@
-
-
-
SEL_2
@@ -1416,22 +1320,6 @@
-
-
-
FIL_15
@@ -1490,22 +1378,6 @@
-
-
-
-
-
-
FS_12
@@ -2030,22 +1886,6 @@
-
-
-
SEL_9
@@ -2153,22 +1993,6 @@
-
-
-
FIL_13
@@ -2419,22 +2243,6 @@
-
-
-
JOIN_8
diff --git ql/src/test/results/compiler/plan/sample1.q.xml ql/src/test/results/compiler/plan/sample1.q.xml
index b2c40a3..18e7103 100644
--- ql/src/test/results/compiler/plan/sample1.q.xml
+++ ql/src/test/results/compiler/plan/sample1.q.xml
@@ -262,22 +262,6 @@
-
-
-
FS_4
@@ -461,22 +445,6 @@
-
-
-
SEL_3
@@ -717,22 +685,6 @@
-
-
-
FIL_1
@@ -817,22 +769,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1049,22 +985,6 @@
-
-
-
FIL_1
@@ -1123,22 +1043,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1036,22 +972,6 @@
-
-
-
FIL_1
@@ -1110,22 +1030,6 @@
-
-
-
-
-
-
FS_5
@@ -224,22 +208,6 @@
-
-
-
TS_4
@@ -740,22 +708,6 @@
-
-
-
FS_3
@@ -836,22 +788,6 @@
-
-
-
SEL_2
@@ -1039,22 +975,6 @@
-
-
-
FIL_1
@@ -1113,22 +1033,6 @@
-
-
-
-
-
-
FS_7
@@ -224,22 +208,6 @@
-
-
-
TS_6
@@ -740,22 +708,6 @@
-
-
-
FS_4
@@ -836,22 +788,6 @@
-
-
-
SEL_3
@@ -1084,22 +1020,6 @@
-
-
-
FIL_5
@@ -1158,22 +1078,6 @@
-
-
-
-
-
-
FS_7
@@ -157,22 +141,6 @@
-
-
-
TS_6
@@ -596,22 +564,6 @@
-
-
-
FS_4
@@ -688,22 +640,6 @@
-
-
-
SEL_2
@@ -807,22 +743,6 @@
-
-
-
FIL_5
@@ -881,22 +801,6 @@
-
-
-
-
-
-
FS_3
@@ -1454,22 +1438,6 @@
-
-
-
SEL_2
@@ -1758,22 +1726,6 @@
-
-
-
FIL_4
@@ -1819,22 +1771,6 @@
-
-
-
-
-
-
FS_2
@@ -1436,22 +1420,6 @@
-
-
-
SEL_1
@@ -1731,22 +1699,6 @@
-
-
-
diff --git ql/src/test/results/compiler/plan/udf6.q.xml ql/src/test/results/compiler/plan/udf6.q.xml
index 334fe0c..3e50bf7 100644
--- ql/src/test/results/compiler/plan/udf6.q.xml
+++ ql/src/test/results/compiler/plan/udf6.q.xml
@@ -221,22 +221,6 @@
-
-
-
FS_2
@@ -415,22 +399,6 @@
-
-
-
SEL_1
@@ -489,22 +457,6 @@
-
-
-
diff --git ql/src/test/results/compiler/plan/udf_case.q.xml ql/src/test/results/compiler/plan/udf_case.q.xml
index 67d55b8..f192090 100644
--- ql/src/test/results/compiler/plan/udf_case.q.xml
+++ ql/src/test/results/compiler/plan/udf_case.q.xml
@@ -225,22 +225,6 @@
-
-
-
FS_3
@@ -300,22 +284,6 @@
-
-
-
LIM_2
@@ -532,22 +500,6 @@
-
-
-
SEL_1
@@ -579,22 +531,6 @@
-
-
-
diff --git ql/src/test/results/compiler/plan/udf_when.q.xml ql/src/test/results/compiler/plan/udf_when.q.xml
index 8334326..d0abc8f 100644
--- ql/src/test/results/compiler/plan/udf_when.q.xml
+++ ql/src/test/results/compiler/plan/udf_when.q.xml
@@ -225,22 +225,6 @@
-
-
-
FS_3
@@ -300,22 +284,6 @@
-
-
-
LIM_2
@@ -612,22 +580,6 @@
-
-
-
SEL_1
@@ -659,22 +611,6 @@
-
-
-
diff --git ql/src/test/results/compiler/plan/union.q.xml ql/src/test/results/compiler/plan/union.q.xml
index 7ae8331..41ca170 100644
--- ql/src/test/results/compiler/plan/union.q.xml
+++ ql/src/test/results/compiler/plan/union.q.xml
@@ -89,22 +89,6 @@
-
-
-
FS_12
@@ -157,22 +141,6 @@
-
-
-
TS_11
@@ -752,22 +720,6 @@
-
-
-
FS_8
@@ -844,22 +796,6 @@
-
-
-
SEL_7
@@ -917,22 +853,6 @@
-
-
-
UNION_6
@@ -1009,22 +929,6 @@
-
-
-
SEL_5
@@ -1088,22 +992,6 @@
-
-
-
FIL_10
@@ -1128,22 +1016,6 @@
-
-
-
-
-
-
SEL_2
@@ -1486,22 +1342,6 @@
-
-
-
FIL_9
@@ -1560,22 +1400,6 @@
-
-
-