diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 20637c0575..23433a5ca7 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -94,6 +94,11 @@
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
org.apache.hive
hive-exec
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
index 6a566182c3..dfa9903615 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
@@ -52,11 +52,11 @@
import org.apache.hadoop.hive.ql.index.IndexSearchCondition;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -260,9 +260,9 @@ protected Object generateRanges(Configuration conf, ColumnMapper columnMapper,
String hiveRowIdColumnName, ExprNodeDesc root) {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler,
columnMapper.getRowIdMapping(), hiveRowIdColumnName);
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List roots = new ArrayList();
roots.add(root);
HashMap nodeOutput = new HashMap();
diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
index 17963820ed..fd4a8ccf5d 100644
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
+++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.hive.accumulo.predicate.compare.LessThanOrEqual;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -45,7 +45,6 @@
import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.UTF8;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,7 +59,7 @@
/**
*
*/
-public class AccumuloRangeGenerator implements NodeProcessor {
+public class AccumuloRangeGenerator implements SemanticNodeProcessor {
private static final Logger LOG = LoggerFactory.getLogger(AccumuloRangeGenerator.class);
private final AccumuloPredicateHandler predicateHandler;
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
index 4975fa0d5e..0b7855678a 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
@@ -24,22 +24,20 @@
import org.apache.hadoop.hive.accumulo.TestAccumuloDefaultIndexScanner;
import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
-import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
import org.apache.hadoop.hive.common.type.Date;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToString;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
@@ -47,7 +45,6 @@
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus;
-import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.junit.Assert;
import org.junit.Before;
@@ -113,9 +110,9 @@ public void testRangeConjunction() throws Exception {
.asList(new Range(new Key("f"), true, new Key("m\0"), false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -168,9 +165,9 @@ public void testRangeDisjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range());
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -241,9 +238,9 @@ public void testRangeConjunctionWithDisjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range(new Key("q"), true, null, false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -296,9 +293,9 @@ public void testPartialRangeConjunction() throws Exception {
List expectedRanges = Arrays.asList(new Range(new Key("f"), true, null, false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -354,9 +351,9 @@ public void testDateRangeConjunction() throws Exception {
"2014-07-01"), false));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -401,9 +398,9 @@ public void testCastExpression() throws Exception {
new GenericUDFOPEqualOrGreaterThan(), Arrays.asList(key, cast));
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "key");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(node);
HashMap nodeOutput = new HashMap();
@@ -450,9 +447,9 @@ public void testRangeOverNonRowIdField() throws Exception {
new GenericUDFOPAnd(), bothFilters);
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -500,9 +497,9 @@ public void testRangeOverStringIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -558,9 +555,9 @@ public void testRangeOverIntegerIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(both);
HashMap nodeOutput = new HashMap();
@@ -598,9 +595,9 @@ public void testRangeOverBooleanIndexedField() throws Exception {
AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
- Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
- Collections. emptyMap(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+ Collections. emptyMap(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(node);
HashMap nodeOutput = new HashMap();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
similarity index 94%
rename from ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
rename to common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 467ce50e6f..8e643fe844 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -15,17 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hive.ql;
-import org.antlr.runtime.tree.Tree;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
-import org.apache.hadoop.hive.ql.parse.ASTNode;
-import org.apache.hadoop.hive.ql.parse.ASTNodeOrigin;
import org.apache.hadoop.security.AccessControlException;
import java.io.FileNotFoundException;
@@ -214,8 +209,7 @@
ALTER_COMMAND_FOR_VIEWS(10131, "To alter a view you need to use the ALTER VIEW command."),
ALTER_COMMAND_FOR_TABLES(10132, "To alter a base table you need to use the ALTER TABLE command."),
ALTER_VIEW_DISALLOWED_OP(10133, "Cannot use this form of ALTER on a view"),
- ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for " + AlterTableType.NON_NATIVE_TABLE_ALLOWED +
- " to a non-native table "),
+ ALTER_TABLE_NON_NATIVE(10134, "ALTER TABLE can only be used for {0} to a non-native table {1}", true),
SORTMERGE_MAPJOIN_FAILED(10135,
"Sort merge bucketed join could not be performed. " +
"If you really want to perform the operation, either set " +
@@ -777,88 +771,6 @@ private ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) {
this.format = format ? new MessageFormat(mesg) : null;
}
- private static int getLine(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getToken().getLine();
- }
-
- return getLine((ASTNode) tree.getChild(0));
- }
-
- private static int getCharPositionInLine(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getToken().getCharPositionInLine();
- }
-
- return getCharPositionInLine((ASTNode) tree.getChild(0));
- }
-
- // Dirty hack as this will throw away spaces and other things - find a better
- // way!
- public static String getText(ASTNode tree) {
- if (tree.getChildCount() == 0) {
- return tree.getText();
- }
- return getText((ASTNode) tree.getChild(tree.getChildCount() - 1));
- }
-
- public String getMsg(ASTNode tree) {
- StringBuilder sb = new StringBuilder();
- renderPosition(sb, tree);
- sb.append(" ");
- sb.append(mesg);
- sb.append(" '");
- sb.append(getText(tree));
- sb.append("'");
- renderOrigin(sb, tree.getOrigin());
- return sb.toString();
- }
-
- static final String LINE_SEP = System.getProperty("line.separator");
-
- public static void renderOrigin(StringBuilder sb, ASTNodeOrigin origin) {
- while (origin != null) {
- sb.append(" in definition of ");
- sb.append(origin.getObjectType());
- sb.append(" ");
- sb.append(origin.getObjectName());
- sb.append(" [");
- sb.append(LINE_SEP);
- sb.append(origin.getObjectDefinition());
- sb.append(LINE_SEP);
- sb.append("] used as ");
- sb.append(origin.getUsageAlias());
- sb.append(" at ");
- ASTNode usageNode = origin.getUsageNode();
- renderPosition(sb, usageNode);
- origin = usageNode.getOrigin();
- }
- }
-
- private static void renderPosition(StringBuilder sb, ASTNode tree) {
- sb.append("Line ");
- sb.append(getLine(tree));
- sb.append(":");
- sb.append(getCharPositionInLine(tree));
- }
- public static String renderPosition(ASTNode n) {
- StringBuilder sb = new StringBuilder();
- ErrorMsg.renderPosition(sb, n);
- return sb.toString();
- }
-
- public String getMsg(Tree tree) {
- return getMsg((ASTNode) tree);
- }
-
- public String getMsg(ASTNode tree, String reason) {
- return getMsg(tree) + ": " + reason;
- }
-
- public String getMsg(Tree tree, String reason) {
- return getMsg((ASTNode) tree, reason);
- }
-
public String getMsg(String reason) {
return mesg + " " + reason;
}
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
new file mode 100644
index 0000000000..dec7a484b7
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Dispatcher interface for Operators Used in operator graph walking to dispatch
+ * process/visitor functions for operators.
+ */
+public interface Dispatcher {
+
+ /**
+ * Dispatcher function.
+ *
+ * @param nd
+ * operator to process.
+ * @param stack
+ * operator stack to process.
+ * @param nodeOutputs
+ * The argument list of outputs from processing other nodes that are
+ * passed to this dispatcher from the walker.
+ * @return Object The return object from the processing call.
+ * @throws HiveException
+ */
+ Object dispatch(Node nd, Stack stack, Object... nodeOutputs)
+ throws HiveException;
+
+}
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java b/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
new file mode 100644
index 0000000000..37bb93d63d
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Collection;
+import java.util.HashMap;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Interface for operator graph walker.
+ */
+public interface GraphWalker {
+
+ /**
+ * starting point for walking.
+ *
+ * @param startNodes
+ * list of starting operators
+ * @param nodeOutput
+ * If this parameter is not null, the call to the function returns
+ * the map from node to objects returned by the processors.
+ * @throws HiveException
+ */
+ void startWalking(Collection startNodes,
+ HashMap nodeOutput) throws HiveException;
+
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Node.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
rename to common/src/java/org/apache/hadoop/hive/ql/lib/Node.java
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
new file mode 100644
index 0000000000..d8d1f5c746
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Base class for processing operators which is no-op. The specific processors
+ * can register their own context with the dispatcher.
+ */
+public interface NodeProcessor {
+
+ /**
+ * Generic process for all ops that don't have specific implementations.
+ *
+ * @param nd
+ * operator to process
+ * @param procCtx
+ * operator processor context
+ * @param nodeOutputs
+ * A variable argument list of outputs from other nodes in the walk
+ * @return Object to be returned by the process call
+ * @throws HiveException
+ */
+ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
+ Object... nodeOutputs) throws HiveException;
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java b/common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
rename to common/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessorCtx.java
diff --git a/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java b/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
new file mode 100644
index 0000000000..6594c2b5b2
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.lib;
+
+import java.util.Stack;
+
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * Rule interface for Operators Used in operator dispatching to dispatch
+ * process/visitor functions for operators.
+ */
+public interface Rule {
+
+ /**
+ * @return the cost of the rule - the lower the cost, the better the rule
+ * matches
+ * @throws HiveException
+ */
+ int cost(Stack stack) throws HiveException;
+
+ /**
+ * @return the name of the rule - may be useful for debugging
+ */
+ String getName();
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java b/common/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
rename to common/src/java/org/apache/hadoop/hive/ql/metadata/HiveException.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java b/common/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
rename to common/src/java/org/apache/hadoop/hive/ql/metadata/HiveFatalException.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java b/common/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
rename to common/src/java/org/apache/hadoop/hive/ql/parse/SemanticException.java
diff --git a/itests/pom.xml b/itests/pom.xml
index 6d8bf97d72..3dc95bfb28 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -177,6 +177,17 @@
hive-serde
${project.version}
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+ tests
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index 825fd3bcf9..b0373d5622 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -110,6 +110,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-kudu/pom.xml b/itests/qtest-kudu/pom.xml
index a7e60e160a..132d22c5d5 100644
--- a/itests/qtest-kudu/pom.xml
+++ b/itests/qtest-kudu/pom.xml
@@ -99,6 +99,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index ebd7b63e6c..b6bbeef9e8 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -160,6 +160,11 @@
hive-serde
test
+
+ org.apache.hive
+ hive-udf
+ test
+
org.apache.hive
hive-exec
diff --git a/kudu-handler/pom.xml b/kudu-handler/pom.xml
index 6f02bd0ab8..c4661fac47 100644
--- a/kudu-handler/pom.xml
+++ b/kudu-handler/pom.xml
@@ -35,6 +35,12 @@
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+ provided
+
org.apache.hive
hive-exec
diff --git a/parser/pom.xml b/parser/pom.xml
new file mode 100644
index 0000000000..05fd78d4a3
--- /dev/null
+++ b/parser/pom.xml
@@ -0,0 +1,107 @@
+
+
+
+ 4.0.0
+
+ org.apache.hive
+ hive
+ 4.0.0-SNAPSHOT
+ ../pom.xml
+
+
+ hive-parser
+ jar
+ Hive Parser
+
+
+ ..
+
+
+
+
+
+
+ org.apache.hive
+ hive-common
+ ${project.version}
+
+
+ org.eclipse.jetty.aggregate
+ jetty-all
+
+
+
+
+
+ org.antlr
+ antlr-runtime
+ ${antlr.version}
+
+
+ org.antlr
+ ST4
+ ${ST4.version}
+
+
+
+
+
+ ${basedir}/src/java
+ ${basedir}/src/test
+
+
+
+ org.antlr
+ antlr3-maven-plugin
+
+
+
+ antlr
+
+
+
+
+ ${basedir}/src/java
+
+ **/HiveLexer.g
+ **/HiveParser.g
+ **/HintParser.g
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-source
+ generate-sources
+
+ add-source
+
+
+
+ ${project.build.directory}/generated-sources/java
+
+
+
+
+
+
+
+
+
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorNode.java
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java
new file mode 100644
index 0000000000..ed05673e12
--- /dev/null
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTErrorUtils.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.parse;
+
+import org.antlr.runtime.tree.Tree;
+
+public class ASTErrorUtils {
+
+ private static int getLine(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getToken().getLine();
+ }
+
+ return getLine((ASTNode) tree.getChild(0));
+ }
+
+ private static int getCharPositionInLine(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getToken().getCharPositionInLine();
+ }
+
+ return getCharPositionInLine((ASTNode) tree.getChild(0));
+ }
+
+ // Dirty hack as this will throw away spaces and other things - find a better
+ // way!
+ public static String getText(ASTNode tree) {
+ if (tree.getChildCount() == 0) {
+ return tree.getText();
+ }
+ return getText((ASTNode) tree.getChild(tree.getChildCount() - 1));
+ }
+
+ public static String getMsg(String mesg, ASTNode tree) {
+ StringBuilder sb = new StringBuilder();
+ renderPosition(sb, tree);
+ sb.append(" ");
+ sb.append(mesg);
+ sb.append(" '");
+ sb.append(getText(tree));
+ sb.append("'");
+ renderOrigin(sb, tree.getOrigin());
+ return sb.toString();
+ }
+
+ static final String LINE_SEP = System.getProperty("line.separator");
+
+ public static void renderOrigin(StringBuilder sb, ASTNodeOrigin origin) {
+ while (origin != null) {
+ sb.append(" in definition of ");
+ sb.append(origin.getObjectType());
+ sb.append(" ");
+ sb.append(origin.getObjectName());
+ sb.append(" [");
+ sb.append(LINE_SEP);
+ sb.append(origin.getObjectDefinition());
+ sb.append(LINE_SEP);
+ sb.append("] used as ");
+ sb.append(origin.getUsageAlias());
+ sb.append(" at ");
+ ASTNode usageNode = origin.getUsageNode();
+ renderPosition(sb, usageNode);
+ origin = usageNode.getOrigin();
+ }
+ }
+
+ private static void renderPosition(StringBuilder sb, ASTNode tree) {
+ sb.append("Line ");
+ sb.append(getLine(tree));
+ sb.append(":");
+ sb.append(getCharPositionInLine(tree));
+ }
+
+ public static String renderPosition(ASTNode n) {
+ StringBuilder sb = new StringBuilder();
+ renderPosition(sb, n);
+ return sb.toString();
+ }
+
+ public static String getMsg(String mesg, Tree tree) {
+ return getMsg(mesg, (ASTNode) tree);
+ }
+
+ public static String getMsg(String mesg, ASTNode tree, String reason) {
+ return getMsg(mesg, tree) + ": " + reason;
+ }
+
+ public static String getMsg(String mesg, Tree tree, String reason) {
+ return getMsg(mesg, (ASTNode) tree, reason);
+ }
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HintParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ParseError.java
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
similarity index 100%
rename from ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
rename to parser/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g
diff --git a/pom.xml b/pom.xml
index d4c23a0d8e..2dd2128e88 100644
--- a/pom.xml
+++ b/pom.xml
@@ -46,6 +46,8 @@
hplsql
jdbc
metastore
+ parser
+ udf
ql
serde
service-rpc
diff --git a/ql/pom.xml b/ql/pom.xml
index 08a8ff751b..3632a5efe4 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -63,6 +63,16 @@
hive-serde
${project.version}
+
+ org.apache.hive
+ hive-parser
+ ${project.version}
+
+
+ org.apache.hive
+ hive-udf
+ ${project.version}
+
org.apache.hive
hive-service-rpc
@@ -144,16 +154,6 @@
log4j-slf4j-impl
${log4j2.version}
-
- org.antlr
- antlr-runtime
- ${antlr.version}
-
-
- org.antlr
- ST4
- ${ST4.version}
-
org.apache.avro
avro
@@ -856,25 +856,6 @@
${basedir}/src/test
-
- org.antlr
- antlr3-maven-plugin
-
-
-
- antlr
-
-
-
-
- ${basedir}/src/java
-
- **/HiveLexer.g
- **/HiveParser.g
- **/HintParser.g
-
-
-
org.apache.maven.plugins
maven-antrun-plugin
@@ -959,6 +940,7 @@
org.apache.hive:hive-common
+ org.apache.hive:hive-udf
org.apache.hive:hive-exec
org.apache.hive:hive-serde
org.apache.hive:hive-llap-common
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
index 4eed5c9861..a2177e0291 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/function/macro/create/CreateMacroAnalyzer.java
@@ -37,7 +37,7 @@
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
import org.apache.hadoop.hive.ql.hooks.WriteEntity;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.PreOrderWalker;
import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -91,7 +91,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException {
if (!arguments.isEmpty()) {
// Walk down expression to see which arguments are actually used.
Node expression = (Node) root.getChild(2);
- PreOrderWalker walker = new PreOrderWalker(new Dispatcher() {
+ PreOrderWalker walker = new PreOrderWalker(new SemanticDispatcher() {
@Override
public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) throws SemanticException {
if (nd instanceof ASTNode) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
index 105636e340..81800fe000 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractAlterTableAnalyzer.java
@@ -189,7 +189,8 @@ protected void validateAlterTableType(Table tbl, AlterTableType op, boolean expe
}
}
if (tbl.isNonNative() && !AlterTableType.NON_NATIVE_TABLE_ALLOWED.contains(op)) {
- throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.getMsg(tbl.getTableName()));
+ throw new SemanticException(ErrorMsg.ALTER_TABLE_NON_NATIVE.format(
+ AlterTableType.NON_NATIVE_TABLE_ALLOWED.toString(), tbl.getTableName()));
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 3e4364612b..db5ee8d536 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -1074,27 +1074,6 @@ public static GenericUDAFEvaluator getGenericWindowingEvaluator(String name,
system.getGenericWindowingEvaluator(name, argumentOIs, isDistinct, isAllColumns);
}
- /**
- * This method is shared between UDFRegistry and UDAFRegistry. methodName will
- * be "evaluate" for UDFRegistry, and "aggregate"/"evaluate"/"evaluatePartial"
- * for UDAFRegistry.
- * @throws UDFArgumentException
- */
- public static Method getMethodInternal(Class extends T> udfClass,
- String methodName, boolean exact, List argumentClasses)
- throws UDFArgumentException {
-
- List mlist = new ArrayList();
-
- for (Method m : udfClass.getMethods()) {
- if (m.getName().equals(methodName)) {
- mlist.add(m);
- }
- }
-
- return getMethodInternal(udfClass, mlist, exact, argumentClasses);
- }
-
public static GenericUDAFResolver getGenericUDAFResolver(String functionName)
throws SemanticException {
if (LOG.isDebugEnabled()) {
@@ -1141,253 +1120,6 @@ public static Object invoke(Method m, Object thisObject, Object... arguments)
return o;
}
- /**
- * Returns -1 if passed does not match accepted. Otherwise return the cost
- * (usually 0 for no conversion and 1 for conversion).
- */
- public static int matchCost(TypeInfo argumentPassed,
- TypeInfo argumentAccepted, boolean exact) {
- if (argumentAccepted.equals(argumentPassed)
- || TypeInfoUtils.doPrimitiveCategoriesMatch(argumentPassed, argumentAccepted)) {
- // matches
- return 0;
- }
- if (argumentPassed.equals(TypeInfoFactory.voidTypeInfo)) {
- // passing null matches everything
- return 0;
- }
- if (argumentPassed.getCategory().equals(Category.LIST)
- && argumentAccepted.getCategory().equals(Category.LIST)) {
- // lists are compatible if and only-if the elements are compatible
- TypeInfo argumentPassedElement = ((ListTypeInfo) argumentPassed)
- .getListElementTypeInfo();
- TypeInfo argumentAcceptedElement = ((ListTypeInfo) argumentAccepted)
- .getListElementTypeInfo();
- return matchCost(argumentPassedElement, argumentAcceptedElement, exact);
- }
- if (argumentPassed.getCategory().equals(Category.MAP)
- && argumentAccepted.getCategory().equals(Category.MAP)) {
- // lists are compatible if and only-if the elements are compatible
- TypeInfo argumentPassedKey = ((MapTypeInfo) argumentPassed)
- .getMapKeyTypeInfo();
- TypeInfo argumentAcceptedKey = ((MapTypeInfo) argumentAccepted)
- .getMapKeyTypeInfo();
- TypeInfo argumentPassedValue = ((MapTypeInfo) argumentPassed)
- .getMapValueTypeInfo();
- TypeInfo argumentAcceptedValue = ((MapTypeInfo) argumentAccepted)
- .getMapValueTypeInfo();
- int cost1 = matchCost(argumentPassedKey, argumentAcceptedKey, exact);
- int cost2 = matchCost(argumentPassedValue, argumentAcceptedValue, exact);
- if (cost1 < 0 || cost2 < 0) {
- return -1;
- }
- return Math.max(cost1, cost2);
- }
-
- if (argumentAccepted.equals(TypeInfoFactory.unknownTypeInfo)) {
- // accepting Object means accepting everything,
- // but there is a conversion cost.
- return 1;
- }
- if (!exact && TypeInfoUtils.implicitConvertible(argumentPassed, argumentAccepted)) {
- return 1;
- }
-
- return -1;
- }
-
- /**
- * Given a set of candidate methods and list of argument types, try to
- * select the best candidate based on how close the passed argument types are
- * to the candidate argument types.
- * For a varchar argument, we would prefer evaluate(string) over evaluate(double).
- * @param udfMethods list of candidate methods
- * @param argumentsPassed list of argument types to match to the candidate methods
- */
- static void filterMethodsByTypeAffinity(List udfMethods, List argumentsPassed) {
- if (udfMethods.size() > 1) {
- // Prefer methods with a closer signature based on the primitive grouping of each argument.
- // Score each method based on its similarity to the passed argument types.
- int currentScore = 0;
- int bestMatchScore = 0;
- Method bestMatch = null;
- for (Method m: udfMethods) {
- currentScore = 0;
- List argumentsAccepted =
- TypeInfoUtils.getParameterTypeInfos(m, argumentsPassed.size());
- Iterator argsPassedIter = argumentsPassed.iterator();
- for (TypeInfo acceptedType : argumentsAccepted) {
- // Check the affinity of the argument passed in with the accepted argument,
- // based on the PrimitiveGrouping
- TypeInfo passedType = argsPassedIter.next();
- if (acceptedType.getCategory() == Category.PRIMITIVE
- && passedType.getCategory() == Category.PRIMITIVE) {
- PrimitiveGrouping acceptedPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(
- ((PrimitiveTypeInfo) acceptedType).getPrimitiveCategory());
- PrimitiveGrouping passedPg = PrimitiveObjectInspectorUtils.getPrimitiveGrouping(
- ((PrimitiveTypeInfo) passedType).getPrimitiveCategory());
- if (acceptedPg == passedPg) {
- // The passed argument matches somewhat closely with an accepted argument
- ++currentScore;
- }
- }
- }
- // Check if the score for this method is any better relative to others
- if (currentScore > bestMatchScore) {
- bestMatchScore = currentScore;
- bestMatch = m;
- } else if (currentScore == bestMatchScore) {
- bestMatch = null; // no longer a best match if more than one.
- }
- }
-
- if (bestMatch != null) {
- // Found a best match during this processing, use it.
- udfMethods.clear();
- udfMethods.add(bestMatch);
- }
- }
- }
-
- /**
- * Gets the closest matching method corresponding to the argument list from a
- * list of methods.
- *
- * @param mlist
- * The list of methods to inspect.
- * @param exact
- * Boolean to indicate whether this is an exact match or not.
- * @param argumentsPassed
- * The classes for the argument.
- * @return The matching method.
- */
- public static Method getMethodInternal(Class> udfClass, List mlist, boolean exact,
- List argumentsPassed) throws UDFArgumentException {
-
- // result
- List udfMethods = new ArrayList();
- // The cost of the result
- int leastConversionCost = Integer.MAX_VALUE;
-
- for (Method m : mlist) {
- List argumentsAccepted = TypeInfoUtils.getParameterTypeInfos(m,
- argumentsPassed.size());
- if (argumentsAccepted == null) {
- // null means the method does not accept number of arguments passed.
- continue;
- }
-
- boolean match = (argumentsAccepted.size() == argumentsPassed.size());
- int conversionCost = 0;
-
- for (int i = 0; i < argumentsPassed.size() && match; i++) {
- int cost = matchCost(argumentsPassed.get(i), argumentsAccepted.get(i),
- exact);
- if (cost == -1) {
- match = false;
- } else {
- conversionCost += cost;
- }
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("Method " + (match ? "did" : "didn't") + " match: passed = "
- + argumentsPassed + " accepted = " + argumentsAccepted +
- " method = " + m);
- }
- if (match) {
- // Always choose the function with least implicit conversions.
- if (conversionCost < leastConversionCost) {
- udfMethods.clear();
- udfMethods.add(m);
- leastConversionCost = conversionCost;
- // Found an exact match
- if (leastConversionCost == 0) {
- break;
- }
- } else if (conversionCost == leastConversionCost) {
- // Ambiguous call: two methods with the same number of implicit
- // conversions
- udfMethods.add(m);
- // Don't break! We might find a better match later.
- } else {
- // do nothing if implicitConversions > leastImplicitConversions
- }
- }
- }
-
- if (udfMethods.size() == 0) {
- // No matching methods found
- throw new NoMatchingMethodException(udfClass, argumentsPassed, mlist);
- }
-
- if (udfMethods.size() > 1) {
- // First try selecting methods based on the type affinity of the arguments passed
- // to the candidate method arguments.
- filterMethodsByTypeAffinity(udfMethods, argumentsPassed);
- }
-
- if (udfMethods.size() > 1) {
-
- // if the only difference is numeric types, pick the method
- // with the smallest overall numeric type.
- int lowestNumericType = Integer.MAX_VALUE;
- boolean multiple = true;
- Method candidate = null;
- List referenceArguments = null;
-
- for (Method m: udfMethods) {
- int maxNumericType = 0;
-
- List argumentsAccepted = TypeInfoUtils.getParameterTypeInfos(m, argumentsPassed.size());
-
- if (referenceArguments == null) {
- // keep the arguments for reference - we want all the non-numeric
- // arguments to be the same
- referenceArguments = argumentsAccepted;
- }
-
- Iterator referenceIterator = referenceArguments.iterator();
-
- for (TypeInfo accepted: argumentsAccepted) {
- TypeInfo reference = referenceIterator.next();
-
- boolean acceptedIsPrimitive = false;
- PrimitiveCategory acceptedPrimCat = PrimitiveCategory.UNKNOWN;
- if (accepted.getCategory() == Category.PRIMITIVE) {
- acceptedIsPrimitive = true;
- acceptedPrimCat = ((PrimitiveTypeInfo) accepted).getPrimitiveCategory();
- }
- if (acceptedIsPrimitive && TypeInfoUtils.numericTypes.containsKey(acceptedPrimCat)) {
- // We're looking for the udf with the smallest maximum numeric type.
- int typeValue = TypeInfoUtils.numericTypes.get(acceptedPrimCat);
- maxNumericType = typeValue > maxNumericType ? typeValue : maxNumericType;
- } else if (!accepted.equals(reference)) {
- // There are non-numeric arguments that don't match from one UDF to
- // another. We give up at this point.
- throw new AmbiguousMethodException(udfClass, argumentsPassed, mlist);
- }
- }
-
- if (lowestNumericType > maxNumericType) {
- multiple = false;
- lowestNumericType = maxNumericType;
- candidate = m;
- } else if (maxNumericType == lowestNumericType) {
- // multiple udfs with the same max type. Unless we find a lower one
- // we'll give up.
- multiple = true;
- }
- }
-
- if (!multiple) {
- return candidate;
- } else {
- throw new AmbiguousMethodException(udfClass, argumentsPassed, mlist);
- }
- }
- return udfMethods.get(0);
- }
-
/**
* A shortcut to get the "index" GenericUDF. This is used for getting elements
* out of array and getting values out of map.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 067c0f002b..08eec1948d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -87,12 +87,12 @@
import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -188,7 +188,7 @@
*/
private final ConcurrentHashMap copyNotifiers = new ConcurrentHashMap<>();
- class CollectFileSinkUrisNodeProcessor implements NodeProcessor {
+ class CollectFileSinkUrisNodeProcessor implements SemanticNodeProcessor {
private final Set uris;
@@ -218,7 +218,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
}
- private void addCollectFileSinkUrisRules(Map opRules, NodeProcessor np) {
+ private void addCollectFileSinkUrisRules(Map opRules, SemanticNodeProcessor np) {
opRules.put(new RuleRegExp("R1", FileSinkOperator.getOperatorName() + ".*"), np);
}
@@ -226,11 +226,11 @@ private void collectFileSinkUris(List topNodes, Set uris) {
CollectFileSinkUrisNodeProcessor np = new CollectFileSinkUrisNodeProcessor(uris);
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
addCollectFileSinkUrisRules(opRules, np);
- Dispatcher disp = new DefaultRuleDispatcher(np, opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(np, opRules, null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
try {
ogw.startWalking(topNodes, null);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/AccurateEstimatesCheckerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/AccurateEstimatesCheckerHook.java
index 8299894da9..90264c13db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/AccurateEstimatesCheckerHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/AccurateEstimatesCheckerHook.java
@@ -29,10 +29,10 @@
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
@@ -56,7 +56,7 @@
private double absErr;
private double relErr;
- class EstimateCheckerHook implements NodeProcessor {
+ class EstimateCheckerHook implements SemanticNodeProcessor {
Map> opMap = new HashMap<>();
@@ -136,8 +136,8 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, List> r
return;
}
- Dispatcher disp = new DefaultRuleDispatcher(new EstimateCheckerHook(), new HashMap<>(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(new EstimateCheckerHook(), new HashMap<>(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
HashMap nodeOutput = new HashMap();
ogw.startWalking(rootOps, nodeOutput);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
index 9a6a2e9df7..ca9a954cb0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/NoOperatorReuseCheckerHook.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.hooks;
-import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -29,10 +28,10 @@
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.BaseWork;
@@ -46,7 +45,7 @@
*/
public class NoOperatorReuseCheckerHook implements ExecuteWithHookContext {
- static class UniqueOpIdChecker implements NodeProcessor {
+ static class UniqueOpIdChecker implements SemanticNodeProcessor {
Map> opMap = new HashMap<>();
@@ -94,8 +93,8 @@ public void run(HookContext hookContext) throws Exception {
return;
}
- Dispatcher disp = new DefaultRuleDispatcher(new UniqueOpIdChecker(), new HashMap<>(), null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(new UniqueOpIdChecker(), new HashMap<>(), null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
HashMap nodeOutput = new HashMap();
ogw.startWalking(rootOps, nodeOutput);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
index f39ba87a80..e0b85de467 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
@@ -20,12 +20,12 @@
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -142,11 +142,11 @@ public ExprNodeDesc analyzePredicate(
ExprNodeDesc predicate,
final List searchConditions) {
- Map opRules = new LinkedHashMap();
- NodeProcessor nodeProcessor = new NodeProcessor() {
+ Map opRules = new LinkedHashMap();
+ SemanticNodeProcessor nodeProcessor = new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs)
+ NodeProcessorCtx procCtx, Object... nodeOutputs)
throws SemanticException {
// We can only push down stuff which appears as part of
@@ -164,9 +164,9 @@ public Object process(Node nd, Stack stack,
}
};
- Dispatcher disp = new DefaultRuleDispatcher(
+ SemanticDispatcher disp = new DefaultRuleDispatcher(
nodeProcessor, opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.add(predicate);
HashMap nodeOutput = new HashMap();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java
index 9f843fef6c..a4afad44d2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/CompositeProcessor.java
@@ -26,18 +26,18 @@
* rule.
*
*/
-public class CompositeProcessor implements NodeProcessor {
+public class CompositeProcessor implements SemanticNodeProcessor {
- NodeProcessor[] procs;
+ SemanticNodeProcessor[] procs;
- public CompositeProcessor(NodeProcessor...nodeProcessors) {
+ public CompositeProcessor(SemanticNodeProcessor...nodeProcessors) {
procs = nodeProcessors;
}
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs)
throws SemanticException {
- for (NodeProcessor proc: procs) {
+ for (SemanticNodeProcessor proc: procs) {
proc.process(nd, stack, procCtx, nodeOutputs);
}
return null;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/CostLessRuleDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/CostLessRuleDispatcher.java
index a67044e23e..7e060e24e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/CostLessRuleDispatcher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/CostLessRuleDispatcher.java
@@ -28,11 +28,11 @@
* Dispatches calls to relevant method in processor. The user registers various
* rules with the dispatcher, and the processor corresponding to the type of node
*/
-public class CostLessRuleDispatcher implements Dispatcher {
+public class CostLessRuleDispatcher implements SemanticDispatcher {
- private final SetMultimap procRules;
+ private final SetMultimap procRules;
private final NodeProcessorCtx procCtx;
- private final NodeProcessor defaultProc;
+ private final SemanticNodeProcessor defaultProc;
/**
* Constructor.
@@ -41,8 +41,8 @@
* @param rules Map mapping the node's type to processor
* @param procCtx operator processor context, which is opaque to the dispatcher
*/
- public CostLessRuleDispatcher(NodeProcessor defaultProc, SetMultimap rules,
- NodeProcessorCtx procCtx) {
+ public CostLessRuleDispatcher(SemanticNodeProcessor defaultProc, SetMultimap rules,
+ NodeProcessorCtx procCtx) {
this.defaultProc = defaultProc;
procRules = rules;
this.procCtx = procCtx;
@@ -59,7 +59,7 @@ public CostLessRuleDispatcher(NodeProcessor defaultProc, SetMultimap toWalk = new ArrayList();
protected final IdentityHashMap retMap = new IdentityHashMap();
- protected final Dispatcher dispatcher;
+ protected final SemanticDispatcher dispatcher;
/**
* Constructor.
@@ -63,7 +63,7 @@
* @param disp
* dispatcher to call for each op encountered
*/
- public DefaultGraphWalker(Dispatcher disp) {
+ public DefaultGraphWalker(SemanticDispatcher disp) {
dispatcher = disp;
opStack = new Stack();
opQueue = new LinkedList();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java
index d0bb77f9ca..0cb816be2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/DefaultRuleDispatcher.java
@@ -28,11 +28,11 @@
* rules with the dispatcher, and the processor corresponding to closest
* matching rule is fired.
*/
-public class DefaultRuleDispatcher implements Dispatcher {
+public class DefaultRuleDispatcher implements SemanticDispatcher {
- private final Map procRules;
+ private final Map procRules;
private final NodeProcessorCtx procCtx;
- private final NodeProcessor defaultProc;
+ private final SemanticNodeProcessor defaultProc;
/**
* Constructor.
@@ -44,8 +44,8 @@
* @param procCtx
* operator processor context, which is opaque to the dispatcher
*/
- public DefaultRuleDispatcher(NodeProcessor defaultProc,
- Map rules, NodeProcessorCtx procCtx) {
+ public DefaultRuleDispatcher(SemanticNodeProcessor defaultProc,
+ Map rules, NodeProcessorCtx procCtx) {
this.defaultProc = defaultProc;
procRules = rules;
this.procCtx = procCtx;
@@ -66,9 +66,9 @@ public Object dispatch(Node nd, Stack ndStack, Object... nodeOutputs)
// find the firing rule
// find the rule from the stack specified
- Rule rule = null;
+ SemanticRule rule = null;
int minCost = Integer.MAX_VALUE;
- for (Rule r : procRules.keySet()) {
+ for (SemanticRule r : procRules.keySet()) {
int cost = r.cost(ndStack);
if ((cost >= 0) && (cost <= minCost)) {
minCost = cost;
@@ -76,7 +76,7 @@ public Object dispatch(Node nd, Stack ndStack, Object... nodeOutputs)
}
}
- NodeProcessor proc;
+ SemanticNodeProcessor proc;
if (rule == null) {
proc = defaultProc;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/ExpressionWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/ExpressionWalker.java
index 7b193144b9..bb5b2ece86 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/ExpressionWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/ExpressionWalker.java
@@ -36,7 +36,7 @@
* @param disp
* dispatcher to call for each op encountered
*/
- public ExpressionWalker(Dispatcher disp) {
+ public ExpressionWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java
index d64d6949f8..45ebd18ea7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java
@@ -30,7 +30,7 @@
* @param disp
* dispatcher to call for each op encountered
*/
- public ForwardWalker(Dispatcher disp) {
+ public ForwardWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/LevelOrderWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/LevelOrderWalker.java
index bccd9fb7da..96ed10d573 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/LevelOrderWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/LevelOrderWalker.java
@@ -60,7 +60,7 @@
*
* @param disp Dispatcher to call for each op encountered
*/
- public LevelOrderWalker(Dispatcher disp) {
+ public LevelOrderWalker(SemanticDispatcher disp) {
super(disp);
this.numLevels = Integer.MAX_VALUE;
}
@@ -72,7 +72,7 @@ public LevelOrderWalker(Dispatcher disp) {
* @param disp Dispatcher to call for each op encountered
* @param numLevels Number of ancestor levels
*/
- public LevelOrderWalker(Dispatcher disp, int numLevels) {
+ public LevelOrderWalker(SemanticDispatcher disp, int numLevels) {
super(disp);
this.numLevels = numLevels;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
index 06e144e877..ad3356f7bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
@@ -26,7 +26,7 @@
*/
public class PreOrderOnceWalker extends PreOrderWalker {
- public PreOrderOnceWalker(Dispatcher disp) {
+ public PreOrderOnceWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java
index be31656d23..12e2f1769b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderWalker.java
@@ -39,7 +39,7 @@
* @param disp
* dispatcher to call for each op encountered
*/
- public PreOrderWalker(Dispatcher disp) {
+ public PreOrderWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
index 10409b67e9..fe407c13a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
@@ -27,7 +27,7 @@
* process/visitor functions for Nodes. The cost method returns 1 if there is an exact
* match between the expression and the stack, otherwise -1.
*/
-public class RuleExactMatch implements Rule {
+public class RuleExactMatch implements SemanticRule {
private final String ruleName;
private final String[] pattern;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
index db62db2c40..fdfc599c2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
@@ -33,7 +33,7 @@
* Rule interface for Nodes Used in Node dispatching to dispatch process/visitor
* functions for Nodes.
*/
-public class RuleRegExp implements Rule {
+public class RuleRegExp implements SemanticRule {
private final String ruleName;
private final Pattern patternWithWildCardChar;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticDispatcher.java
similarity index 95%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
rename to ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticDispatcher.java
index 34fd5a4d5b..1ea1d4094d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/Dispatcher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticDispatcher.java
@@ -26,7 +26,7 @@
* Dispatcher interface for Operators Used in operator graph walking to dispatch
* process/visitor functions for operators.
*/
-public interface Dispatcher {
+public interface SemanticDispatcher extends Dispatcher {
/**
* Dispatcher function.
@@ -41,6 +41,7 @@
* @return Object The return object from the processing call.
* @throws SemanticException
*/
+ @Override
Object dispatch(Node nd, Stack stack, Object... nodeOutputs)
throws SemanticException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticGraphWalker.java
similarity index 95%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
rename to ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticGraphWalker.java
index 032f1a5e9f..63544203eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticGraphWalker.java
@@ -26,7 +26,7 @@
/**
* Interface for operator graph walker.
*/
-public interface GraphWalker {
+public interface SemanticGraphWalker extends GraphWalker {
/**
* starting point for walking.
@@ -38,6 +38,7 @@
* the map from node to objects returned by the processors.
* @throws SemanticException
*/
+ @Override
void startWalking(Collection startNodes,
HashMap nodeOutput) throws SemanticException;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticNodeProcessor.java
similarity index 95%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
rename to ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticNodeProcessor.java
index 069a7e1d5a..cfee71dbdd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/NodeProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticNodeProcessor.java
@@ -25,7 +25,7 @@
* Base class for processing operators which is no-op. The specific processors
* can register their own context with the dispatcher.
*/
-public interface NodeProcessor {
+public interface SemanticNodeProcessor extends NodeProcessor {
/**
* Generic process for all ops that don't have specific implementations.
@@ -39,6 +39,7 @@
* @return Object to be returned by the process call
* @throws SemanticException
*/
+ @Override
Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticRule.java
similarity index 95%
rename from ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
rename to ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticRule.java
index f735eb5ae7..23a671003a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/Rule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/SemanticRule.java
@@ -26,17 +26,19 @@
* Rule interface for Operators Used in operator dispatching to dispatch
* process/visitor functions for operators.
*/
-public interface Rule {
+public interface SemanticRule extends Rule {
/**
* @return the cost of the rule - the lower the cost, the better the rule
* matches
* @throws SemanticException
*/
+ @Override
int cost(Stack stack) throws SemanticException;
/**
* @return the name of the rule - may be useful for debugging
*/
+ @Override
String getName();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/SubqueryExpressionWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/SubqueryExpressionWalker.java
index 75f09e4617..86b7efec47 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/SubqueryExpressionWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/SubqueryExpressionWalker.java
@@ -21,7 +21,7 @@
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
-public class SubqueryExpressionWalker extends ExpressionWalker{
+public class SubqueryExpressionWalker extends ExpressionWalker {
/**
* Constructor.
@@ -29,7 +29,7 @@
* @param disp
* dispatcher to call for each op encountered
*/
- public SubqueryExpressionWalker(Dispatcher disp) {
+ public SubqueryExpressionWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java
index 23cdb625c0..b1c7adcf92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/TaskGraphWalker.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.lib;
-import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@@ -37,7 +36,7 @@
* (dispatchedList) and a list of operators that are discovered but not yet
* dispatched
*/
-public class TaskGraphWalker implements GraphWalker {
+public class TaskGraphWalker implements SemanticGraphWalker {
public class TaskGraphWalkerContext{
@@ -56,7 +55,7 @@ public void addToDispatchList(Node dispatchedObj){
protected Stack opStack;
private final List toWalk = new ArrayList();
private final HashMap retMap = new HashMap();
- private final Dispatcher dispatcher;
+ private final SemanticDispatcher dispatcher;
private final TaskGraphWalkerContext walkerCtx;
/**
@@ -65,7 +64,7 @@ public void addToDispatchList(Node dispatchedObj){
* @param disp
* dispatcher to call for each op encountered
*/
- public TaskGraphWalker(Dispatcher disp) {
+ public TaskGraphWalker(SemanticDispatcher disp) {
dispatcher = disp;
opStack = new Stack();
walkerCtx = new TaskGraphWalkerContext(retMap);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/TypeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/TypeRule.java
index 21e527fed0..cf262048c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/TypeRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/TypeRule.java
@@ -25,7 +25,7 @@
/**
* Rule that matches a particular type of node.
*/
-public class TypeRule implements Rule {
+public class TypeRule implements SemanticRule {
private Class> nodeClass;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
index d3fb91eddb..d69df776ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
@@ -41,7 +41,7 @@
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -59,7 +59,7 @@
/**
* this transformation does bucket map join optimization.
*/
-abstract public class AbstractBucketJoinProc implements NodeProcessor {
+abstract public class AbstractBucketJoinProc implements SemanticNodeProcessor {
protected ParseContext pGraphContext;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
index e17a17fe04..3f30c8c2e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java
@@ -38,7 +38,7 @@
import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator;
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -56,7 +56,7 @@
import org.apache.hadoop.util.ReflectionUtils;
//try to replace a bucket map join with a sorted merge map join
-abstract public class AbstractSMBJoinProc extends AbstractBucketJoinProc implements NodeProcessor {
+abstract public class AbstractSMBJoinProc extends AbstractBucketJoinProc implements SemanticNodeProcessor {
public AbstractSMBJoinProc(ParseContext pctx) {
super(pctx);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
index bacd6bb713..7ffeba4648 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
@@ -28,12 +28,12 @@
import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -51,7 +51,7 @@ public BucketMapJoinOptimizer() {
public ParseContext transform(ParseContext pctx) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
BucketJoinProcCtx bucketMapJoinOptimizeCtx =
new BucketJoinProcCtx(pctx.getConf());
@@ -62,9 +62,9 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules,
+ SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules,
bucketMapJoinOptimizeCtx);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
List topNodes = new ArrayList();
@@ -74,15 +74,15 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private NodeProcessor getBucketMapjoinProc(ParseContext pctx) {
+ private SemanticNodeProcessor getBucketMapjoinProc(ParseContext pctx) {
return new BucketMapjoinProc(pctx);
}
- private NodeProcessor getDefaultProc() {
- return new NodeProcessor() {
+ private SemanticNodeProcessor getDefaultProc() {
+ return new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs)
+ NodeProcessorCtx procCtx, Object... nodeOutputs)
throws SemanticException {
return null;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java
index adcf61898a..5d4e6cd8c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java
@@ -25,14 +25,13 @@
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.ParseContext;
-import org.apache.hadoop.hive.ql.parse.QBJoinTree;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-public class BucketMapjoinProc extends AbstractBucketJoinProc implements NodeProcessor {
+public class BucketMapjoinProc extends AbstractBucketJoinProc implements SemanticNodeProcessor {
public BucketMapjoinProc(ParseContext pGraphContext) {
super(pGraphContext);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
index e733b70066..f74ac2feee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java
@@ -38,12 +38,12 @@
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -80,7 +80,7 @@ public BucketingSortingReduceSinkOptimizer() {
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
// process reduce sink added by hive.enforce.bucketing or hive.enforce.sorting
opRules.put(new RuleRegExp("R1",
@@ -90,8 +90,8 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
getBucketSortReduceSinkProc(pctx));
// The dispatcher fires the processor corresponding to the closest matching rule
- Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of top nodes
ArrayList topNodes = new ArrayList();
@@ -101,17 +101,17 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private NodeProcessor getDefaultProc() {
- return new NodeProcessor() {
+ private SemanticNodeProcessor getDefaultProc() {
+ return new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
+ NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
return null;
}
};
}
- private NodeProcessor getBucketSortReduceSinkProc(ParseContext pctx) {
+ private SemanticNodeProcessor getBucketSortReduceSinkProc(ParseContext pctx) {
return new BucketSortReduceSinkProcessor(pctx);
}
@@ -119,7 +119,7 @@ private NodeProcessor getBucketSortReduceSinkProc(ParseContext pctx) {
* BucketSortReduceSinkProcessor.
*
*/
- public class BucketSortReduceSinkProcessor implements NodeProcessor {
+ public class BucketSortReduceSinkProcessor implements SemanticNodeProcessor {
private final Logger LOG = LoggerFactory.getLogger(BucketSortReduceSinkProcessor.class);
protected ParseContext pGraphContext;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
index ef94ea4bf0..bd59b95ead 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPruner.java
@@ -38,13 +38,12 @@
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
-import org.apache.hadoop.hive.ql.parse.ColumnAccessInfo;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -84,7 +83,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1",
FilterOperator.getOperatorName() + "%"),
ColumnPrunerProcFactory.getFilterProc());
@@ -126,9 +125,9 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
ColumnPrunerProcFactory.getUnionProc());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(ColumnPrunerProcFactory
+ SemanticDispatcher disp = new DefaultRuleDispatcher(ColumnPrunerProcFactory
.getDefaultProc(), opRules, cppCtx);
- GraphWalker ogw = new ColumnPrunerWalker(disp);
+ SemanticGraphWalker ogw = new ColumnPrunerWalker(disp);
// Create a list of topop nodes
ArrayList topNodes = new ArrayList();
@@ -146,7 +145,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
*/
public static class ColumnPrunerWalker extends DefaultGraphWalker {
- public ColumnPrunerWalker(Dispatcher disp) {
+ public ColumnPrunerWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index d3749fdcf1..5dc6bd08e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -52,7 +52,7 @@
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -99,7 +99,7 @@ private ColumnPrunerProcFactory() {
/**
* Node Processor for Column Pruning on Filter Operators.
*/
- public static class ColumnPrunerFilterProc implements NodeProcessor {
+ public static class ColumnPrunerFilterProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -130,7 +130,7 @@ public static ColumnPrunerFilterProc getFilterProc() {
/**
* Node Processor for Column Pruning on Group By Operators.
*/
- public static class ColumnPrunerGroupByProc implements NodeProcessor {
+ public static class ColumnPrunerGroupByProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -220,7 +220,7 @@ public static ColumnPrunerGroupByProc getGroupByProc() {
return new ColumnPrunerGroupByProc();
}
- public static class ColumnPrunerScriptProc implements NodeProcessor {
+ public static class ColumnPrunerScriptProc implements SemanticNodeProcessor {
@Override
@SuppressWarnings("unchecked")
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
@@ -453,7 +453,7 @@ public static ColumnPrunerPTFProc getPTFProc() {
/**
* The Default Node Processor for Column Pruning.
*/
- public static class ColumnPrunerDefaultProc implements NodeProcessor {
+ public static class ColumnPrunerDefaultProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -478,7 +478,7 @@ public static ColumnPrunerDefaultProc getDefaultProc() {
* The Node Processor for Column Pruning on Table Scan Operators. It will
* store needed columns in tableScanDesc.
*/
- public static class ColumnPrunerTableScanProc implements NodeProcessor {
+ public static class ColumnPrunerTableScanProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -566,7 +566,7 @@ public static ColumnPrunerTableScanProc getTableScanProc() {
/**
* The Node Processor for Column Pruning on Reduce Sink Operators.
*/
- public static class ColumnPrunerReduceSinkProc implements NodeProcessor {
+ public static class ColumnPrunerReduceSinkProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -646,7 +646,7 @@ public static ColumnPrunerReduceSinkProc getReduceSinkProc() {
/**
* The Node Processor for Column Pruning on Lateral View Join Operators.
*/
- public static class ColumnPrunerLateralViewJoinProc implements NodeProcessor {
+ public static class ColumnPrunerLateralViewJoinProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -742,7 +742,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
/**
* The Node Processor for Column Pruning on Select Operators.
*/
- public static class ColumnPrunerSelectProc implements NodeProcessor {
+ public static class ColumnPrunerSelectProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -969,7 +969,7 @@ public static ColumnPrunerLateralViewForwardProc getLateralViewForwardProc() {
/**
* The Node Processor for Column Pruning on Join Operators.
*/
- public static class ColumnPrunerJoinProc implements NodeProcessor {
+ public static class ColumnPrunerJoinProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -992,7 +992,7 @@ public static ColumnPrunerJoinProc getJoinProc() {
/**
* The Node Processor for Column Pruning on Map Join Operators.
*/
- public static class ColumnPrunerMapJoinProc implements NodeProcessor {
+ public static class ColumnPrunerMapJoinProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx,
Object... nodeOutputs) throws SemanticException {
@@ -1015,7 +1015,7 @@ public static ColumnPrunerUnionProc getUnionProc() {
/**
* The Node Processor for Column Pruning on Union Operators.
*/
- public static class ColumnPrunerUnionProc implements NodeProcessor {
+ public static class ColumnPrunerUnionProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1042,8 +1042,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object..
}
private static void pruneOperator(NodeProcessorCtx ctx,
- Operator extends OperatorDesc> op,
- List cols)
+ Operator extends OperatorDesc> op,
+ List cols)
throws SemanticException {
// the pruning needs to preserve the order of columns in the input schema
RowSchema inputSchema = op.getSchema();
@@ -1086,9 +1086,9 @@ private static void pruneOperator(NodeProcessorCtx ctx,
}
private static void pruneJoinOperator(NodeProcessorCtx ctx,
- CommonJoinOperator op, JoinDesc conf,
- Map columnExprMap,
- Map> retainMap, boolean mapJoin) throws SemanticException {
+ CommonJoinOperator op, JoinDesc conf,
+ Map columnExprMap,
+ Map> retainMap, boolean mapJoin) throws SemanticException {
ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx;
List> childOperators = op
.getChildOperators();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
index ef61187fbd..47d9ec7367 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagate.java
@@ -37,11 +37,11 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcCtx.ConstantPropagateOption;
import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -89,7 +89,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"),
ConstantPropagateProcFactory.getFilterProc());
@@ -110,9 +110,9 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(ConstantPropagateProcFactory
+ SemanticDispatcher disp = new DefaultRuleDispatcher(ConstantPropagateProcFactory
.getDefaultProc(), opRules, cppCtx);
- GraphWalker ogw = new ConstantPropagateWalker(disp);
+ SemanticGraphWalker ogw = new ConstantPropagateWalker(disp);
// Create a list of operator nodes to start the walking.
ArrayList topNodes = new ArrayList();
@@ -135,7 +135,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
*/
public static class ConstantPropagateWalker extends DefaultGraphWalker {
- public ConstantPropagateWalker(Dispatcher disp) {
+ public ConstantPropagateWalker(SemanticDispatcher disp) {
super(disp);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index f6a8e910de..1a26ca57a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -45,7 +45,7 @@
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -1043,7 +1043,7 @@ private static void foldOperator(Operator extends Serializable> op,
* Node Processor for Constant Propagation on Filter Operators. The processor is to fold
* conditional expressions and extract assignment expressions and propagate them.
*/
- public static class ConstantPropagateFilterProc implements NodeProcessor {
+ public static class ConstantPropagateFilterProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1098,7 +1098,7 @@ public static ConstantPropagateFilterProc getFilterProc() {
/**
* Node Processor for Constant Propagate for Group By Operators.
*/
- public static class ConstantPropagateGroupByProc implements NodeProcessor {
+ public static class ConstantPropagateGroupByProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1147,7 +1147,7 @@ public static ConstantPropagateGroupByProc getGroupByProc() {
/**
* The Default Node Processor for Constant Propagation.
*/
- public static class ConstantPropagateDefaultProc implements NodeProcessor {
+ public static class ConstantPropagateDefaultProc implements SemanticNodeProcessor {
@Override
@SuppressWarnings("unchecked")
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
@@ -1187,7 +1187,7 @@ public static ConstantPropagateDefaultProc getDefaultProc() {
/**
* The Node Processor for Constant Propagation for Select Operators.
*/
- public static class ConstantPropagateSelectProc implements NodeProcessor {
+ public static class ConstantPropagateSelectProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1255,7 +1255,7 @@ public static ConstantPropagateSelectProc getSelectProc() {
* The Node Processor for constant propagation for FileSink Operators. In addition to constant
* propagation, this processor also prunes dynamic partitions to static partitions if possible.
*/
- public static class ConstantPropagateFileSinkProc implements NodeProcessor {
+ public static class ConstantPropagateFileSinkProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1301,7 +1301,7 @@ private void pruneDP(FileSinkDesc fsdesc) {
}
}
- public static NodeProcessor getFileSinkProc() {
+ public static SemanticNodeProcessor getFileSinkProc() {
return new ConstantPropagateFileSinkProc();
}
@@ -1309,7 +1309,7 @@ public static NodeProcessor getFileSinkProc() {
* The Node Processor for Constant Propagation for Operators which is designed to stop propagate.
* Currently these kinds of Operators include UnionOperator and ScriptOperator.
*/
- public static class ConstantPropagateStopProc implements NodeProcessor {
+ public static class ConstantPropagateStopProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1323,7 +1323,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object..
}
}
- public static NodeProcessor getStopProc() {
+ public static SemanticNodeProcessor getStopProc() {
return new ConstantPropagateStopProc();
}
@@ -1332,7 +1332,7 @@ public static NodeProcessor getStopProc() {
* a join, then only those constants from inner join tables, or from the 'inner side' of a outer
* join (left table for left outer join and vice versa) can be propagated.
*/
- public static class ConstantPropagateReduceSinkProc implements NodeProcessor {
+ public static class ConstantPropagateReduceSinkProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1429,14 +1429,14 @@ private boolean skipFolding(JoinDesc joinDesc) {
}
- public static NodeProcessor getReduceSinkProc() {
+ public static SemanticNodeProcessor getReduceSinkProc() {
return new ConstantPropagateReduceSinkProc();
}
/**
* The Node Processor for Constant Propagation for Join Operators.
*/
- public static class ConstantPropagateJoinProc implements NodeProcessor {
+ public static class ConstantPropagateJoinProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1503,14 +1503,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object..
}
- public static NodeProcessor getJoinProc() {
+ public static SemanticNodeProcessor getJoinProc() {
return new ConstantPropagateJoinProc();
}
/**
* The Node Processor for Constant Propagation for Table Scan Operators.
*/
- public static class ConstantPropagateTableScanProc implements NodeProcessor {
+ public static class ConstantPropagateTableScanProc implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs)
throws SemanticException {
@@ -1534,7 +1534,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object..
}
}
- public static NodeProcessor getTableScanProc() {
+ public static SemanticNodeProcessor getTableScanProc() {
return new ConstantPropagateTableScanProc();
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index 256a139890..d8d8cae936 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -49,7 +49,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.physical.LlapClusterStateForCompile;
import org.apache.hadoop.hive.ql.parse.GenTezUtils;
@@ -88,7 +88,7 @@
* converted (e.g.: full outer joins cannot be handled as map joins) as well
* as memory restrictions (one side of the join has to fit into memory).
*/
-public class ConvertJoinMapJoin implements NodeProcessor {
+public class ConvertJoinMapJoin implements SemanticNodeProcessor {
private static final Logger LOG = LoggerFactory.getLogger(ConvertJoinMapJoin.class.getName());
private static final int DEFAULT_MAX_EXECUTORS_PER_QUERY_CONTAINER_MODE = 3;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
index f0cf2f1c30..32edacba7c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
@@ -40,12 +40,12 @@
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
@@ -90,7 +90,7 @@ public CountDistinctRewriteProc() {
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
// process group-by pattern
opRules
.put(
@@ -100,8 +100,8 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(getDefaultProc(), opRules, null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
List topNodes = new ArrayList();
@@ -111,8 +111,8 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private NodeProcessor getDefaultProc() {
- return new NodeProcessor() {
+ private SemanticNodeProcessor getDefaultProc() {
+ return new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
@@ -121,7 +121,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
};
}
- private NodeProcessor getCountDistinctProc(ParseContext pctx) {
+ private SemanticNodeProcessor getCountDistinctProc(ParseContext pctx) {
return new CountDistinctProcessor(pctx);
}
@@ -129,7 +129,7 @@ private NodeProcessor getCountDistinctProc(ParseContext pctx) {
* CountDistinctProcessor.
*
*/
- public class CountDistinctProcessor implements NodeProcessor {
+ public class CountDistinctProcessor implements SemanticNodeProcessor {
protected ParseContext pGraphContext;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
index e0ab6ce349..eca5a8b323 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java
@@ -43,7 +43,7 @@
import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities;
import org.apache.hadoop.hive.ql.io.AcidUtils.Operation;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -95,7 +95,7 @@
* can use an existing join to dynamically prune partitions. This class sets up
* the infrastructure for that.
*/
-public class DynamicPartitionPruningOptimization implements NodeProcessor {
+public class DynamicPartitionPruningOptimization implements SemanticNodeProcessor {
static final private Logger LOG = LoggerFactory.getLogger(DynamicPartitionPruningOptimization.class
.getName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
index c5553fbdd0..5818e6b3c0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java
@@ -35,13 +35,11 @@
import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.optimizer.PrunerOperatorFactory.FilterPruner;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExcept;
import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
@@ -69,7 +67,7 @@ public FixedBucketPruningOptimizer(boolean compat) {
this.compat = compat;
}
- public class NoopWalker implements NodeProcessor {
+ public class NoopWalker implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
Object... nodeOutputs) throws SemanticException {
@@ -82,7 +80,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@Override
protected void generatePredicate(NodeProcessorCtx procCtx,
- FilterOperator fop, TableScanOperator top) throws SemanticException{
+ FilterOperator fop, TableScanOperator top) throws SemanticException{
FixedBucketPruningOptimizerCtxt ctxt = ((FixedBucketPruningOptimizerCtxt) procCtx);
Table tbl = top.getConf().getTableMetadata();
int numBuckets = tbl.getNumBuckets();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
index 10a0405eee..f90aa8484d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -36,7 +35,7 @@
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -48,7 +47,7 @@
/**
* Processor for the rule - table scan followed by reduce sink.
*/
-public class GenMRFileSink1 implements NodeProcessor {
+public class GenMRFileSink1 implements SemanticNodeProcessor {
private static final Logger LOG = LoggerFactory.getLogger(GenMRFileSink1.class.getName());
@@ -173,7 +172,7 @@ private void processLinkedFileDesc(GenMRProcContext ctx,
* @throws SemanticException
*/
private Path processFS(FileSinkOperator fsOp, Stack stack,
- NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException {
+ NodeProcessorCtx opProcCtx, boolean chDir) throws SemanticException {
GenMRProcContext ctx = (GenMRProcContext) opProcCtx;
Task> currTask = ctx.getCurrTask();
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java
index 93288e1090..c477a6ad3c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMROperator.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -32,7 +32,7 @@
/**
* Processor for the rule - no specific rule fired.
*/
-public class GenMROperator implements NodeProcessor {
+public class GenMROperator implements SemanticNodeProcessor {
public GenMROperator() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
index bbda668c08..8ebdb014c6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java
index 033cbdc807..db08e56337 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.Map;
import java.util.Stack;
@@ -26,7 +25,7 @@
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -36,7 +35,7 @@
/**
* Processor for the rule - table scan followed by reduce sink.
*/
-public class GenMRRedSink1 implements NodeProcessor {
+public class GenMRRedSink1 implements SemanticNodeProcessor {
public GenMRRedSink1() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java
index 8c997c7754..ef85be9bd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink2.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.Map;
import java.util.Stack;
@@ -26,7 +25,7 @@
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -35,7 +34,7 @@
/**
* Processor for the rule - reduce sink followed by reduce sink.
*/
-public class GenMRRedSink2 implements NodeProcessor {
+public class GenMRRedSink2 implements SemanticNodeProcessor {
public GenMRRedSink2() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java
index ed4bb30c08..f437610bd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.Stack;
@@ -28,7 +27,7 @@
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Utils;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
@@ -39,7 +38,7 @@
/**
* Processor for the rule - union followed by reduce sink.
*/
-public class GenMRRedSink3 implements NodeProcessor {
+public class GenMRRedSink3 implements SemanticNodeProcessor {
public GenMRRedSink3() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
index bb53ce8728..5f060ec8a0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRTableScan1.java
@@ -28,11 +28,8 @@
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.exec.mr.MapRedTask;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
-import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
-import org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -50,7 +47,7 @@
/**
* Processor for the rule - table scan.
*/
-public class GenMRTableScan1 implements NodeProcessor {
+public class GenMRTableScan1 implements SemanticNodeProcessor {
public GenMRTableScan1() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java
index abf363a348..c09d22f8e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.Map;
import java.util.Stack;
@@ -30,7 +29,7 @@
import org.apache.hadoop.hive.ql.exec.TaskFactory;
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
@@ -47,7 +46,7 @@
/**
* Processor for the rule - TableScan followed by Union.
*/
-public class GenMRUnion1 implements NodeProcessor {
+public class GenMRUnion1 implements SemanticNodeProcessor {
public GenMRUnion1() {
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
index bbbc4f84aa..f52d5652b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
@@ -43,12 +43,12 @@
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -80,7 +80,7 @@ public GroupByOptimizer() {
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
HiveConf conf = pctx.getConf();
if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) {
@@ -103,10 +103,10 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp =
+ SemanticDispatcher disp =
new DefaultRuleDispatcher(getDefaultProc(), opRules,
new GroupByOptimizerContext(conf));
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
List topNodes = new ArrayList();
@@ -116,21 +116,21 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private NodeProcessor getDefaultProc() {
- return new NodeProcessor() {
+ private SemanticNodeProcessor getDefaultProc() {
+ return new SemanticNodeProcessor() {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
+ NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
return null;
}
};
}
- private NodeProcessor getMapSortedGroupbyProc(ParseContext pctx) {
+ private SemanticNodeProcessor getMapSortedGroupbyProc(ParseContext pctx) {
return new SortGroupByProcessor(pctx);
}
- private NodeProcessor getMapSortedGroupbySkewProc(ParseContext pctx) {
+ private SemanticNodeProcessor getMapSortedGroupbySkewProc(ParseContext pctx) {
return new SortGroupBySkewProcessor(pctx);
}
@@ -146,7 +146,7 @@ private NodeProcessor getMapSortedGroupbySkewProc(ParseContext pctx) {
* SortGroupByProcessor.
*
*/
- public class SortGroupByProcessor implements NodeProcessor {
+ public class SortGroupByProcessor implements SemanticNodeProcessor {
protected ParseContext pGraphContext;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 7c841ba48e..a5fe3bb6a9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -38,11 +38,11 @@
import org.apache.hadoop.hive.ql.exec.SelectOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -83,17 +83,17 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
}
// 1. We apply the transformation
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1",
"(" + SelectOperator.getOperatorName() + "%)"), new ProjectRemover());
- GraphWalker ogw = new DefaultGraphWalker(new DefaultRuleDispatcher(null, opRules, null));
+ SemanticGraphWalker ogw = new DefaultGraphWalker(new DefaultRuleDispatcher(null, opRules, null));
ArrayList topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
- private static class ProjectRemover implements NodeProcessor {
+ private static class ProjectRemover implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
index 59ca3f7a1a..4cae3b26a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
@@ -28,16 +28,15 @@
import org.apache.hadoop.hive.ql.exec.GroupByOperator;
import org.apache.hadoop.hive.ql.exec.LimitOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
-import org.apache.hadoop.hive.ql.exec.OperatorUtils;
import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -92,7 +91,7 @@
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1",
ReduceSinkOperator.getOperatorName() + "%" +
".*" +
@@ -105,19 +104,19 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
new TopNPropagator());
LimitPushdownContext context = new LimitPushdownContext(pctx.getConf());
- Dispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, context);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List topNodes = new ArrayList(pctx.getTopOps().values());
ogw.startWalking(topNodes, null);
return pctx;
}
- private static class TopNReducer implements NodeProcessor {
+ private static class TopNReducer implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
+ NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
ReduceSinkOperator rs = null;
for (int i = stack.size() - 2 ; i >= 0; i--) {
Operator> operator = (Operator>) stack.get(i);
@@ -158,11 +157,11 @@ public Object process(Node nd, Stack stack,
}
}
- private static class TopNPropagator implements NodeProcessor {
+ private static class TopNPropagator implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack,
- NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
+ NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
ReduceSinkOperator cRS = (ReduceSinkOperator) nd;
if (cRS.getConf().getTopN() == -1) {
// No limit, nothing to propagate, we just bail out
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
index 21d792e2ee..6a73336bd9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hive.ql.optimizer;
-import java.io.Serializable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -29,7 +28,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -77,7 +76,7 @@ public static int getPositionParent(AbstractMapJoinOperator extends MapJoinDes
* may be performed as a bucketized map-side join (or sort-merge join), the map join operator
* is enhanced to contain the bucketing info. when it is encountered.
*/
- private static class TableScanMapJoinProcessor implements NodeProcessor {
+ private static class TableScanMapJoinProcessor implements SemanticNodeProcessor {
public static void setupBucketMapJoinInfo(MapWork plan,
AbstractMapJoinOperator extends MapJoinDesc> currMapJoinOp) {
@@ -233,7 +232,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
}
- public static NodeProcessor getTableScanMapJoin() {
+ public static SemanticNodeProcessor getTableScanMapJoin() {
return new TableScanMapJoinProcessor();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index a9506c8a0b..4f1c9b2640 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -51,12 +51,12 @@
import org.apache.hadoop.hive.ql.exec.UnionOperator;
import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer.EnabledOverride;
import org.apache.hadoop.hive.ql.parse.GenMapRedWalker;
@@ -932,7 +932,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack.
// The dispatcher generates the plan from the operator tree
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R0",
MapJoinOperator.getOperatorName() + "%"),
getCurrentMapJoin());
@@ -948,10 +948,10 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx(
+ SemanticDispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx(
listMapJoinOpsNoRed, pactx));
- GraphWalker ogw = new GenMapRedWalker(disp);
+ SemanticGraphWalker ogw = new GenMapRedWalker(disp);
ArrayList topNodes = new ArrayList();
topNodes.addAll(listMapJoinOps);
ogw.startWalking(topNodes, null);
@@ -964,7 +964,7 @@ public ParseContext transform(ParseContext pactx) throws SemanticException {
* CurrentMapJoin.
*
*/
- public static class CurrentMapJoin implements NodeProcessor {
+ public static class CurrentMapJoin implements SemanticNodeProcessor {
/**
* Store the current mapjoin in the context.
@@ -1074,7 +1074,7 @@ private static void addRejectMapJoinToCtx(MapJoinWalkerCtx ctx,
* MapJoinFS.
*
*/
- public static class MapJoinFS implements NodeProcessor {
+ public static class MapJoinFS implements SemanticNodeProcessor {
/**
* Store the current mapjoin in a list of mapjoins followed by a filesink.
@@ -1101,7 +1101,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* MapJoinDefault.
*
*/
- public static class MapJoinDefault implements NodeProcessor {
+ public static class MapJoinDefault implements SemanticNodeProcessor {
/**
* Store the mapjoin in a rejected list.
@@ -1120,7 +1120,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* Default.
*
*/
- public static class Default implements NodeProcessor {
+ public static class Default implements SemanticNodeProcessor {
/**
* Nothing to do.
@@ -1132,19 +1132,19 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
}
- public static NodeProcessor getMapJoinFS() {
+ public static SemanticNodeProcessor getMapJoinFS() {
return new MapJoinFS();
}
- public static NodeProcessor getMapJoinDefault() {
+ public static SemanticNodeProcessor getMapJoinDefault() {
return new MapJoinDefault();
}
- public static NodeProcessor getDefault() {
+ public static SemanticNodeProcessor getDefault() {
return new Default();
}
- public static NodeProcessor getCurrentMapJoin() {
+ public static SemanticNodeProcessor getCurrentMapJoin() {
return new CurrentMapJoin();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java
index 327e16d638..4284396fc8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java
@@ -24,7 +24,7 @@
import org.apache.hadoop.hive.ql.exec.DummyStoreOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.GenTezProcContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -35,7 +35,7 @@
import org.apache.hadoop.hive.ql.plan.TezWork;
import org.apache.hadoop.hive.ql.plan.TezWork.VertexType;
-public class MergeJoinProc implements NodeProcessor {
+public class MergeJoinProc implements SemanticNodeProcessor {
@Override
public Object
process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
index b8f8494e47..a5972d0ce2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
@@ -35,12 +35,12 @@
import org.apache.hadoop.hive.ql.exec.SelectOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -60,12 +60,12 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
// 1. We apply the transformation
String SEL = SelectOperator.getOperatorName();
String FIL = FilterOperator.getOperatorName();
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", SEL + "%" + SEL + "%"), new SelectDedup(pctx));
opRules.put(new RuleRegExp("R2", FIL + "%" + FIL + "%"), new FilterDedup());
- Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
@@ -73,7 +73,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private class SelectDedup implements NodeProcessor {
+ private class SelectDedup implements SemanticNodeProcessor {
private ParseContext pctx;
@@ -211,7 +211,7 @@ private void fixContextReferences(SelectOperator cSEL, SelectOperator pSEL) {
}
}
- private class FilterDedup implements NodeProcessor {
+ private class FilterDedup implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
index 5fc29d2493..49d915ce68 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PartitionColumnsSeparator.java
@@ -32,14 +32,14 @@
import org.apache.hadoop.hive.ql.exec.FilterOperator;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
import org.apache.hadoop.hive.ql.lib.ForwardWalker;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.lib.TypeRule;
import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -75,11 +75,11 @@
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
// 1. Trigger transformation
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), new StructInTransformer());
- Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
- GraphWalker ogw = new ForwardWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+ SemanticGraphWalker ogw = new ForwardWalker(disp);
List topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
@@ -87,7 +87,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private class StructInTransformer implements NodeProcessor {
+ private class StructInTransformer implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -117,13 +117,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
private ExprNodeDesc generateInClauses(ExprNodeDesc predicate) throws SemanticException {
- Map exprRules = new LinkedHashMap();
+ Map exprRules = new LinkedHashMap();
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), new StructInExprProcessor());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
- GraphWalker egw = new PreOrderOnceWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
+ SemanticGraphWalker egw = new PreOrderOnceWalker(disp);
List startNodes = new ArrayList();
startNodes.add(predicate);
@@ -147,7 +147,7 @@ private ExprNodeDesc generateInClauses(ExprNodeDesc predicate) throws SemanticEx
* part of the given query. Once the partitions are pruned, the partition condition
* remover is expected to remove the redundant predicates from the plan.
*/
- private class StructInExprProcessor implements NodeProcessor {
+ private class StructInExprProcessor implements SemanticNodeProcessor {
/** TableInfo is populated in PASS 1 of process(). It contains the information required
* to generate an IN clause of the following format:
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
index 3eb224d1a6..f09c8af3ac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
@@ -31,14 +31,14 @@
import org.apache.hadoop.hive.ql.exec.FilterOperator;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
import org.apache.hadoop.hive.ql.lib.ForwardWalker;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.lib.TypeRule;
import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -84,11 +84,11 @@ public PointLookupOptimizer(final int min) {
@Override
public ParseContext transform(ParseContext pctx) throws SemanticException {
// 1. Trigger transformation
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), new FilterTransformer());
- Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
- GraphWalker ogw = new ForwardWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+ SemanticGraphWalker ogw = new ForwardWalker(disp);
List topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
@@ -96,7 +96,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private class FilterTransformer implements NodeProcessor {
+ private class FilterTransformer implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -118,13 +118,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
}
private ExprNodeDesc generateInClause(ExprNodeDesc predicate) throws SemanticException {
- Map exprRules = new LinkedHashMap();
+ Map exprRules = new LinkedHashMap();
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), new OrExprProcessor());
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
- GraphWalker egw = new PreOrderOnceWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
+ SemanticGraphWalker egw = new PreOrderOnceWalker(disp);
List startNodes = new ArrayList();
startNodes.add(predicate);
@@ -135,7 +135,7 @@ private ExprNodeDesc generateInClause(ExprNodeDesc predicate) throws SemanticExc
}
}
- private class OrExprProcessor implements NodeProcessor {
+ private class OrExprProcessor implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java
index 3e81c2b43c..6962dcba1e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerExpressionOperatorFactory.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -44,7 +44,7 @@
* expr is a candidate else it is not a candidate but its children could be
* final candidates.
*/
- public static class GenericFuncExprProcessor implements NodeProcessor {
+ public static class GenericFuncExprProcessor implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -108,7 +108,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* FieldExprProcessor.
*
*/
- public static class FieldExprProcessor implements NodeProcessor {
+ public static class FieldExprProcessor implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -145,7 +145,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* Processor for column expressions.
*/
- public static abstract class ColumnExprProcessor implements NodeProcessor {
+ public static abstract class ColumnExprProcessor implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -166,7 +166,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* @return
*/
protected abstract ExprNodeDesc processColumnDesc(NodeProcessorCtx procCtx,
- ExprNodeColumnDesc cd);
+ ExprNodeColumnDesc cd);
}
@@ -174,7 +174,7 @@ protected abstract ExprNodeDesc processColumnDesc(NodeProcessorCtx procCtx,
* Processor for constants and null expressions. For such expressions the
* processor simply clones the exprNodeDesc and returns it.
*/
- public static class DefaultExprProcessor implements NodeProcessor {
+ public static class DefaultExprProcessor implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -191,7 +191,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* Instantiate default expression processor.
* @return
*/
- public static final NodeProcessor getDefaultExprProcessor() {
+ public static final SemanticNodeProcessor getDefaultExprProcessor() {
return new DefaultExprProcessor();
}
@@ -200,7 +200,7 @@ public static final NodeProcessor getDefaultExprProcessor() {
*
* @return
*/
- public static final NodeProcessor getGenericFuncProcessor() {
+ public static final SemanticNodeProcessor getGenericFuncProcessor() {
return new GenericFuncExprProcessor();
}
@@ -209,7 +209,7 @@ public static final NodeProcessor getGenericFuncProcessor() {
*
* @return
*/
- public static final NodeProcessor getFieldProcessor() {
+ public static final SemanticNodeProcessor getFieldProcessor() {
return new FieldExprProcessor();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
index b5742c6ecd..e04a2b1b97 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -48,7 +48,7 @@
* Determines the partition pruner for the filter. This is called only when
* the filter follows a table scan operator.
*/
- public static abstract class FilterPruner implements NodeProcessor {
+ public static abstract class FilterPruner implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -101,7 +101,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
* @throws UDFArgumentException
*/
protected abstract void generatePredicate(NodeProcessorCtx procCtx, FilterOperator fop,
- TableScanOperator top) throws SemanticException;
+ TableScanOperator top) throws SemanticException;
/**
* Add pruning predicate.
*
@@ -172,7 +172,7 @@ protected void addPruningPred(Map>
/**
* Default processor which just merges its children.
*/
- public static class DefaultPruner implements NodeProcessor {
+ public static class DefaultPruner implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
@@ -189,7 +189,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
*
* @return
*/
- public final static NodeProcessor getDefaultProc() {
+ public final static SemanticNodeProcessor getDefaultProc() {
return new DefaultPruner();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
index 8cfea50e21..6d233ffa7b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerUtils.java
@@ -27,14 +27,13 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleExactMatch;
-import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.lib.TypeRule;
import org.apache.hadoop.hive.ql.parse.ParseContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -62,8 +61,8 @@ private PrunerUtils() {
* @throws SemanticException
*/
public static void walkOperatorTree(ParseContext pctx, NodeProcessorCtx opWalkerCtx,
- NodeProcessor filterProc, NodeProcessor defaultProc) throws SemanticException {
- Map opRules = new LinkedHashMap();
+ SemanticNodeProcessor filterProc, SemanticNodeProcessor defaultProc) throws SemanticException {
+ Map opRules = new LinkedHashMap();
// Build regular expression for operator rule.
// "(TS%FIL%)|(TS%FIL%FIL%)"
@@ -75,8 +74,8 @@ public static void walkOperatorTree(ParseContext pctx, NodeProcessorCtx opWalker
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(defaultProc, opRules, opWalkerCtx);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(defaultProc, opRules, opWalkerCtx);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList topNodes = new ArrayList();
@@ -97,21 +96,21 @@ public static void walkOperatorTree(ParseContext pctx, NodeProcessorCtx opWalker
* @throws SemanticException
*/
public static Map walkExprTree(ExprNodeDesc pred, NodeProcessorCtx ctx,
- NodeProcessor colProc, NodeProcessor fieldProc, NodeProcessor genFuncProc,
- NodeProcessor defProc)
+ SemanticNodeProcessor colProc, SemanticNodeProcessor fieldProc, SemanticNodeProcessor genFuncProc,
+ SemanticNodeProcessor defProc)
throws SemanticException {
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
- Map exprRules = new LinkedHashMap();
+ Map exprRules = new LinkedHashMap();
exprRules.put(new TypeRule(ExprNodeColumnDesc.class) , colProc);
exprRules.put(new TypeRule(ExprNodeFieldDesc.class), fieldProc);
exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), genFuncProc);
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
- Dispatcher disp = new DefaultRuleDispatcher(defProc, exprRules, ctx);
- GraphWalker egw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(defProc, exprRules, ctx);
+ SemanticGraphWalker egw = new DefaultGraphWalker(disp);
List startNodes = new ArrayList();
startNodes.add(pred);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
index b591b73298..bd6c41819d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java
@@ -38,7 +38,7 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.GenTezProcContext;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -64,7 +64,7 @@
import com.google.common.collect.Sets;
-public class ReduceSinkMapJoinProc implements NodeProcessor {
+public class ReduceSinkMapJoinProc implements SemanticNodeProcessor {
private final static Logger LOG = LoggerFactory.getLogger(ReduceSinkMapJoinProc.class.getName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RedundantDynamicPruningConditionsRemoval.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RedundantDynamicPruningConditionsRemoval.java
index 4a60158892..f50afbcb8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RedundantDynamicPruningConditionsRemoval.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RedundantDynamicPruningConditionsRemoval.java
@@ -30,12 +30,12 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.ParseContext;
@@ -78,12 +78,12 @@
public ParseContext transform(ParseContext pctx) throws SemanticException {
// Make sure semijoin is not enabled. If it is, then do not remove the dynamic partition pruning predicates.
if (!pctx.getConf().getBoolVar(HiveConf.ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
- Map opRules = new LinkedHashMap();
+ Map opRules = new LinkedHashMap();
opRules.put(new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%" +
FilterOperator.getOperatorName() + "%"), new FilterTransformer());
- Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
- GraphWalker ogw = new DefaultGraphWalker(disp);
+ SemanticDispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+ SemanticGraphWalker ogw = new DefaultGraphWalker(disp);
List topNodes = new ArrayList();
topNodes.addAll(pctx.getTopOps().values());
@@ -92,7 +92,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
return pctx;
}
- private class FilterTransformer implements NodeProcessor {
+ private class FilterTransformer implements SemanticNodeProcessor {
@Override
public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
index c0f5699ea0..52614d3ebf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/RemoveDynamicPruningBySize.java
@@ -25,7 +25,7 @@
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.parse.GenTezUtils;
import org.apache.hadoop.hive.ql.parse.OptimizeTezProcContext;
@@ -37,7 +37,7 @@
* If we expect the number of keys for dynamic pruning to be too large we
* disable it.
*/
-public class RemoveDynamicPruningBySize implements NodeProcessor {
+public class RemoveDynamicPruningBySize implements SemanticNodeProcessor {
static final private Logger LOG = LoggerFactory.getLogger(RemoveDynamicPruningBySize.class.getName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
index bb9681499a..b3aac5c1b2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java
@@ -37,12 +37,12 @@
import org.apache.hadoop.hive.ql.exec.TableScanOperator;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
-import org.apache.hadoop.hive.ql.lib.Dispatcher;
-import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.SemanticDispatcher;
+import org.apache.hadoop.hive.ql.lib.SemanticGraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
-import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.SemanticNodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
-import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.SemanticRule;
import org.apache.hadoop.hive.ql.lib.RuleRegExp;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -103,7 +103,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
// create a the context for walking operators
SamplePrunerCtx samplePrunerCtx = new SamplePrunerCtx(pctx.getOpToSamplePruner());
- Map opRules = new LinkedHashMap();
+ Map