partitions, String partColName) {
* - all columns in BitSet are partition
* columns.
*/
- public boolean containsPartitionColumnsOnly(BitSet cols) {
+ public boolean containsPartitionColumnsOnly(ImmutableBitSet cols) {
for (int i = cols.nextSetBit(0); i >= 0; i++, i = cols.nextSetBit(i + 1)) {
if (!hivePartitionColsMap.containsKey(i)) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java
similarity index 73%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java
index 4b44a28ca77540fd643fc03b89dcb4b2155d081a..f18284667146b8374787f2e51c6f16cb4b2ae226 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/TraitsUtil.java
@@ -16,14 +16,14 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq;
+package org.apache.hadoop.hive.ql.optimizer.calcite;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
-import org.eigenbase.rel.RelCollation;
-import org.eigenbase.rel.RelCollationImpl;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelTraitSet;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollationImpl;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
public class TraitsUtil {
public static RelTraitSet getSortTraitSet(RelOptCluster cluster, RelTraitSet traitSet,
@@ -32,6 +32,6 @@ public static RelTraitSet getSortTraitSet(RelOptCluster cluster, RelTraitSet tra
}
public static RelTraitSet getDefaultTraitSet(RelOptCluster cluster) {
- return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+ return cluster.traitSetOf(HiveRelNode.CONVENTION, RelCollationImpl.EMPTY);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
similarity index 97%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
index 72fe5d6f26d0fd9a34c8e89be3040cce4593fd4a..71b66803559096abb29b9ac59e43e2110f1b9269 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
@@ -15,11 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+package org.apache.hadoop.hive.ql.optimizer.calcite.cost;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptCostFactory;
-import org.eigenbase.relopt.RelOptUtil;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptCostFactory;
+import org.apache.calcite.plan.RelOptUtil;
// TODO: This should inherit from VolcanoCost and should just override isLE method.
public class HiveCost implements RelOptCost {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostUtil.java
similarity index 80%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostUtil.java
index 7436f12f662542c41e71a7fee37179e35e4e2553..c7e9217e19f771fbd36cecd1b99dcf284aeca549 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCostUtil.java
@@ -15,11 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+package org.apache.hadoop.hive.ql.optimizer.calcite.cost;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.eigenbase.relopt.RelOptCost;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
// Use this once we have Join Algorithm selection
public class HiveCostUtil {
@@ -32,11 +32,11 @@
//Use this once we have Join Algorithm selection
private static final double hDFSReadCostInNanoSec = 1.5 * localFSReadCostInNanoSec;
- public static RelOptCost computCardinalityBasedCost(HiveRel hr) {
+ public static RelOptCost computCardinalityBasedCost(HiveRelNode hr) {
return new HiveCost(hr.getRows(), 0, 0);
}
- public static HiveCost computeCost(HiveTableScanRel t) {
+ public static HiveCost computeCost(HiveTableScan t) {
double cardinality = t.getRows();
return new HiveCost(cardinality, 0, hDFSWriteCostInNanoSec * cardinality * 0);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
similarity index 76%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
index 5deb801649f47e0629b3583ef57c62d4a4699f78..ebcd4f34fa72b5ae0e89a6fc6d3f8bf285756b4f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveVolcanoPlanner.java
@@ -16,18 +16,18 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.cost;
+package org.apache.hadoop.hive.ql.optimizer.calcite.cost;
-import org.eigenbase.rel.RelCollationTraitDef;
-import org.eigenbase.relopt.ConventionTraitDef;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.volcano.VolcanoPlanner;
+import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
+import org.apache.calcite.rel.RelCollationTraitDef;
/**
- * Refinement of {@link org.eigenbase.relopt.volcano.VolcanoPlanner} for Hive.
+ * Refinement of {@link org.apache.calcite.plan.volcano.VolcanoPlanner} for Hive.
*
*
- * It uses {@link org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost} as
+ * It uses {@link org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost} as
* its cost model.
*/
public class HiveVolcanoPlanner extends VolcanoPlanner {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java
deleted file mode 100644
index fc198958735e12cb3503a0b4c486d8328a10a2fa..0000000000000000000000000000000000000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
-
-import java.util.BitSet;
-import java.util.List;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.eigenbase.rel.AggregateCall;
-import org.eigenbase.rel.AggregateRelBase;
-import org.eigenbase.rel.InvalidRelException;
-import org.eigenbase.rel.RelFactories.AggregateFactory;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-
-public class HiveAggregateRel extends AggregateRelBase implements HiveRel {
-
- public static final HiveAggRelFactory HIVE_AGGR_REL_FACTORY = new HiveAggRelFactory();
-
- public HiveAggregateRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
- BitSet groupSet, List aggCalls) throws InvalidRelException {
- super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, groupSet, aggCalls);
- }
-
- @Override
- public AggregateRelBase copy(RelTraitSet traitSet, RelNode input, BitSet groupSet,
- List aggCalls) {
- try {
- return new HiveAggregateRel(getCluster(), traitSet, input, groupSet, aggCalls);
- } catch (InvalidRelException e) {
- // Semantic error not possible. Must be a bug. Convert to
- // internal error.
- throw new AssertionError(e);
- }
- }
-
- @Override
- public void implement(Implementor implementor) {
- }
-
- @Override
- public RelOptCost computeSelfCost(RelOptPlanner planner) {
- return HiveCost.FACTORY.makeZeroCost();
- }
-
- @Override
- public double getRows() {
- return RelMetadataQuery.getDistinctRowCount(this, groupSet, getCluster().getRexBuilder()
- .makeLiteral(true));
- }
-
- private static class HiveAggRelFactory implements AggregateFactory {
-
- @Override
- public RelNode createAggregate(RelNode child, BitSet groupSet,
- List aggCalls) {
- try {
- return new HiveAggregateRel(child.getCluster(), child.getTraitSet(), child, groupSet, aggCalls);
- } catch (InvalidRelException e) {
- throw new RuntimeException(e);
- }
- }
- }
-}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
similarity index 57%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
index 8b850463ac1c3270163725f876404449ef8dc5f9..3e45a3fbed3265b126a3ff9b6ffe44bee24453ef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java
@@ -15,31 +15,31 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.RelFactories.FilterFactory;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.rex.RexNode;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.RelFactories.FilterFactory;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
-public class HiveFilterRel extends FilterRelBase implements HiveRel {
+public class HiveFilter extends Filter implements HiveRelNode {
public static final FilterFactory DEFAULT_FILTER_FACTORY = new HiveFilterFactoryImpl();
- public HiveFilterRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) {
+ public HiveFilter(RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) {
super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, condition);
}
@Override
- public FilterRelBase copy(RelTraitSet traitSet, RelNode input, RexNode condition) {
- assert traitSet.containsIfApplicable(HiveRel.CONVENTION);
- return new HiveFilterRel(getCluster(), traitSet, input, getCondition());
+ public Filter copy(RelTraitSet traitSet, RelNode input, RexNode condition) {
+ assert traitSet.containsIfApplicable(HiveRelNode.CONVENTION);
+ return new HiveFilter(getCluster(), traitSet, input, getCondition());
}
@Override
@@ -53,14 +53,14 @@ public RelOptCost computeSelfCost(RelOptPlanner planner) {
/**
* Implementation of {@link FilterFactory} that returns
- * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel}
+ * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter}
* .
*/
private static class HiveFilterFactoryImpl implements FilterFactory {
@Override
public RelNode createFilter(RelNode child, RexNode condition) {
RelOptCluster cluster = child.getCluster();
- HiveFilterRel filter = new HiveFilterRel(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, condition);
+ HiveFilter filter = new HiveFilter(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, condition);
return filter;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
similarity index 77%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
index 3d6aa848cd4c83ec8eb22f7df449911d67a53b9b..724135b0fc86560774d58b6685c1b4b3f9e0436c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
@@ -15,29 +15,29 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.Collections;
import java.util.Set;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.eigenbase.rel.InvalidRelException;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.RelFactories.JoinFactory;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexNode;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.InvalidRelException;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.RelFactories.JoinFactory;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
-//TODO: Should we convert MultiJoin to be a child of HiveJoinRelBase
-public class HiveJoinRel extends JoinRelBase implements HiveRel {
+//TODO: Should we convert MultiJoin to be a child of HiveJoin
+public class HiveJoin extends Join implements HiveRelNode {
// NOTE: COMMON_JOIN & SMB_JOIN are Sort Merge Join (in case of COMMON_JOIN
// each parallel computation handles multiple splits where as in case of SMB
// each parallel computation handles one bucket). MAP_JOIN and BUCKET_JOIN is
@@ -59,18 +59,18 @@
@SuppressWarnings("unused")
private final MapJoinStreamingRelation mapJoinStreamingSide = MapJoinStreamingRelation.NONE;
- public static HiveJoinRel getJoin(RelOptCluster cluster, RelNode left, RelNode right,
+ public static HiveJoin getJoin(RelOptCluster cluster, RelNode left, RelNode right,
RexNode condition, JoinRelType joinType, boolean leftSemiJoin) {
try {
Set variablesStopped = Collections.emptySet();
- return new HiveJoinRel(cluster, null, left, right, condition, joinType, variablesStopped,
+ return new HiveJoin(cluster, null, left, right, condition, joinType, variablesStopped,
JoinAlgorithm.NONE, null, leftSemiJoin);
} catch (InvalidRelException e) {
throw new RuntimeException(e);
}
}
- protected HiveJoinRel(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
+ protected HiveJoin(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
RexNode condition, JoinRelType joinType, Set variablesStopped,
JoinAlgorithm joinAlgo, MapJoinStreamingRelation streamingSideForMapJoin, boolean leftSemiJoin)
throws InvalidRelException {
@@ -85,11 +85,11 @@ public void implement(Implementor implementor) {
}
@Override
- public final HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left,
+ public final HiveJoin copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left,
RelNode right, JoinRelType joinType, boolean semiJoinDone) {
try {
Set variablesStopped = Collections.emptySet();
- return new HiveJoinRel(getCluster(), traitSet, left, right, conditionExpr, joinType,
+ return new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
variablesStopped, JoinAlgorithm.NONE, null, leftSemiJoin);
} catch (InvalidRelException e) {
// Semantic error not possible. Must be a bug. Convert to
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
similarity index 60%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
index f8755d0175c10e5b5461649773bf44abe998b44e..5fc64f3e8c97fc8988bc35be39dbabf78dd7de24 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveLimit.java
@@ -15,36 +15,36 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.List;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.SingleRel;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.rex.RexNode;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.SingleRel;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
-public class HiveLimitRel extends SingleRel implements HiveRel {
+public class HiveLimit extends SingleRel implements HiveRelNode {
private final RexNode offset;
private final RexNode fetch;
- HiveLimitRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset,
+ HiveLimit(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset,
RexNode fetch) {
super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child);
this.offset = offset;
this.fetch = fetch;
- assert getConvention() instanceof HiveRel;
+ assert getConvention() instanceof HiveRelNode;
assert getConvention() == child.getConvention();
}
@Override
- public HiveLimitRel copy(RelTraitSet traitSet, List newInputs) {
- return new HiveLimitRel(getCluster(), traitSet, sole(newInputs), offset, fetch);
+ public HiveLimit copy(RelTraitSet traitSet, List newInputs) {
+ return new HiveLimit(getCluster(), traitSet, sole(newInputs), offset, fetch);
}
public void implement(Implementor implementor) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
similarity index 67%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index 7b434ea58451bef6a6566eb241933843ee855606..6c215c96190f0fcebe063b15c2763c49ebf1faaf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -15,43 +15,43 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.core.RelFactories.ProjectFactory;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.util.Util;
+import org.apache.calcite.util.mapping.Mapping;
+import org.apache.calcite.util.mapping.MappingType;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
+
import com.google.common.collect.ImmutableList;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelCollation;
-import org.eigenbase.rel.RelFactories.ProjectFactory;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexBuilder;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexUtil;
-import org.eigenbase.util.Util;
-import org.eigenbase.util.mapping.Mapping;
-import org.eigenbase.util.mapping.MappingType;
-
-public class HiveProjectRel extends ProjectRelBase implements HiveRel {
+public class HiveProject extends Project implements HiveRelNode {
public static final ProjectFactory DEFAULT_PROJECT_FACTORY = new HiveProjectFactoryImpl();
private final List virtualCols;
/**
- * Creates a HiveProjectRel.
+ * Creates a HiveProject.
*
* @param cluster
* Cluster this relational expression belongs to
@@ -62,16 +62,16 @@
* @param rowType
* output row type
* @param flags
- * values as in {@link ProjectRelBase.Flags}
+ * values as in {@link Project.Flags}
*/
- public HiveProjectRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+ public HiveProject(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
List extends RexNode> exps, RelDataType rowType, int flags) {
super(cluster, traitSet, child, exps, rowType, flags);
- virtualCols = ImmutableList.copyOf(HiveOptiqUtil.getVirtualCols(exps));
+ virtualCols = ImmutableList.copyOf(HiveCalciteUtil.getVirtualCols(exps));
}
/**
- * Creates a HiveProjectRel with no sort keys.
+ * Creates a HiveProject with no sort keys.
*
* @param child
* input relational expression
@@ -80,34 +80,34 @@ public HiveProjectRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child
* @param fieldNames
* aliases of the expressions
*/
- public static HiveProjectRel create(RelNode child, List extends RexNode> exps,
- List fieldNames) throws OptiqSemanticException{
+ public static HiveProject create(RelNode child, List extends RexNode> exps,
+ List fieldNames) throws CalciteSemanticException{
RelOptCluster cluster = child.getCluster();
- // 1 Ensure columnNames are unique - OPTIQ-411
+ // 1 Ensure columnNames are unique - CALCITE-411
if (fieldNames != null && !Util.isDistinct(fieldNames)) {
String msg = "Select list contains multiple expressions with the same name." + fieldNames;
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), exps, fieldNames);
return create(cluster, child, exps, rowType, Collections. emptyList());
}
/**
- * Creates a HiveProjectRel.
+ * Creates a HiveProject.
*/
- public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List extends RexNode> exps,
+ public static HiveProject create(RelOptCluster cluster, RelNode child, List extends RexNode> exps,
RelDataType rowType, final List collationList) {
RelTraitSet traitSet = TraitsUtil.getDefaultTraitSet(cluster);
- return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED);
+ return new HiveProject(cluster, traitSet, child, exps, rowType, Flags.BOXED);
}
/**
- * Creates a HiveProjectRel.
+ * Creates a HiveProject.
*/
- public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List extends RexNode> exps,
+ public static HiveProject create(RelOptCluster cluster, RelNode child, List extends RexNode> exps,
RelDataType rowType, RelTraitSet traitSet, final List collationList) {
- return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED);
+ return new HiveProject(cluster, traitSet, child, exps, rowType, Flags.BOXED);
}
/**
@@ -135,9 +135,9 @@ public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List
* Field names; if null, or if a particular entry is null, the name
* of the permuted field is used
* @return relational expression which projects a subset of the input fields
- * @throws OptiqSemanticException
+ * @throws CalciteSemanticException
*/
- public static RelNode projectMapping(RelNode rel, Mapping mapping, List fieldNames) throws OptiqSemanticException {
+ public static RelNode projectMapping(RelNode rel, Mapping mapping, List fieldNames) throws CalciteSemanticException {
assert mapping.getMappingType().isSingleSource();
assert mapping.getMappingType().isMandatorySource();
@@ -163,10 +163,10 @@ public static RelNode projectMapping(RelNode rel, Mapping mapping, List
}
@Override
- public ProjectRelBase copy(RelTraitSet traitSet, RelNode input, List exps,
+ public Project copy(RelTraitSet traitSet, RelNode input, List exps,
RelDataType rowType) {
- assert traitSet.containsIfApplicable(HiveRel.CONVENTION);
- return new HiveProjectRel(getCluster(), traitSet, input, exps, rowType, getFlags());
+ assert traitSet.containsIfApplicable(HiveRelNode.CONVENTION);
+ return new HiveProject(getCluster(), traitSet, input, exps, rowType, getFlags());
}
@Override
@@ -184,7 +184,7 @@ public void implement(Implementor implementor) {
/**
* Implementation of {@link ProjectFactory} that returns
- * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel}
+ * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject}
* .
*/
private static class HiveProjectFactoryImpl implements ProjectFactory {
@@ -194,7 +194,7 @@ public RelNode createProject(RelNode child,
List extends RexNode> childExprs, List fieldNames) {
RelOptCluster cluster = child.getCluster();
RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), childExprs, fieldNames);
- RelNode project = HiveProjectRel.create(cluster, child,
+ RelNode project = HiveProject.create(cluster, child,
childExprs, rowType,
child.getTraitSet(), Collections. emptyList());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveRelNode.java
similarity index 81%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveRelNode.java
index 4738c4ac2d33cd15d2db7fe4b8336e1f59dd5212..30acfe223987110757e8813e51559883c5ff4b00 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveRelNode.java
@@ -15,22 +15,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.relopt.Convention;
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.rel.RelNode;
-public interface HiveRel extends RelNode {
+public interface HiveRelNode extends RelNode {
void implement(Implementor implementor);
/** Calling convention for relational operations that occur in Hive. */
- final Convention CONVENTION = new Convention.Impl("HIVE", HiveRel.class);
+ final Convention CONVENTION = new Convention.Impl("HIVE", HiveRelNode.class);
class Implementor {
public void visitChild(int ordinal, RelNode input) {
assert ordinal == 0;
- ((HiveRel) input).implement(this);
+ ((HiveRelNode) input).implement(this);
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
similarity index 67%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
index f85363d50c1c3eb9cef39072106057669454d4da..18d283824a02594a74d4c10192a583d496b25dd4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSort.java
@@ -15,47 +15,47 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.Map;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.eigenbase.rel.RelCollation;
-import org.eigenbase.rel.RelFactories;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.SortRel;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.rex.RexNode;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rex.RexNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
import com.google.common.collect.ImmutableMap;
-public class HiveSortRel extends SortRel implements HiveRel {
+public class HiveSort extends Sort implements HiveRelNode {
public static final HiveSortRelFactory HIVE_SORT_REL_FACTORY = new HiveSortRelFactory();
- // NOTE: this is to work around Hive Optiq Limitations w.r.t OB.
- // 1. Optiq can not accept expressions in OB; instead it needs to be expressed
+ // NOTE: this is to work around Hive Calcite Limitations w.r.t OB.
+ // 1. Calcite can not accept expressions in OB; instead it needs to be expressed
// as VC in input Select.
// 2. Hive can not preserve ordering through select boundaries.
// 3. This map is used for outermost OB to migrate the VC corresponding OB
// expressions from input select.
- // 4. This is used by ASTConverter after we are done with Optiq Planning
+ // 4. This is used by ASTConverter after we are done with Calcite Planning
private ImmutableMap mapOfInputRefToRexCall;
- public HiveSortRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
+ public HiveSort(RelOptCluster cluster, RelTraitSet traitSet, RelNode child,
RelCollation collation, RexNode offset, RexNode fetch) {
super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation,
offset, fetch);
}
@Override
- public HiveSortRel copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation,
+ public HiveSort copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation,
RexNode offset, RexNode fetch) {
// TODO: can we blindly copy sort trait? What if inputs changed and we
// are now sorting by different cols
RelCollation canonizedCollation = traitSet.canonize(newCollation);
- return new HiveSortRel(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
+ return new HiveSort(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch);
}
public RexNode getFetchExpr() {
@@ -79,7 +79,7 @@ public void implement(Implementor implementor) {
@Override
public RelNode createSort(RelTraitSet traits, RelNode child, RelCollation collation,
RexNode offset, RexNode fetch) {
- return new HiveSortRel(child.getCluster(), traits, child, collation, offset, fetch);
+ return new HiveSort(child.getCluster(), traits, child, collation, offset, fetch);
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
similarity index 71%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
index bd66459def099df6432f344a9d8439deef09daa6..53021ea92d7084a28beabb00828787cc81e7e4fd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
@@ -15,21 +15,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.List;
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
import org.apache.hadoop.hive.ql.plan.ColStatistics;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.TableAccessRelBase;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.reltype.RelDataType;
/**
@@ -40,7 +40,7 @@
* methods.
*
*/
-public class HiveTableScanRel extends TableAccessRelBase implements HiveRel {
+public class HiveTableScan extends TableScan implements HiveRelNode {
/**
* Creates a HiveTableScan.
@@ -54,10 +54,10 @@
* @param table
* HiveDB table
*/
- public HiveTableScanRel(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
+ public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
RelDataType rowtype) {
super(cluster, TraitsUtil.getDefaultTraitSet(cluster), table);
- assert getConvention() == HiveRel.CONVENTION;
+ assert getConvention() == HiveRelNode.CONVENTION;
}
@Override
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
similarity index 62%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
index d34fe9540e239c13f6bd23894056305c0c402e0d..72226e7497efa4a83ec794ba71070ab1c9450d04 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveUnion.java
@@ -15,30 +15,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators;
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
import java.util.List;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel.Implementor;
-import org.eigenbase.rel.RelFactories;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.SetOpRel;
-import org.eigenbase.rel.UnionRelBase;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.sql.SqlKind;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.SetOp;
+import org.apache.calcite.rel.core.Union;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode.Implementor;
-public class HiveUnionRel extends UnionRelBase {
+public class HiveUnion extends Union {
public static final HiveUnionRelFactory UNION_REL_FACTORY = new HiveUnionRelFactory();
- public HiveUnionRel(RelOptCluster cluster, RelTraitSet traits, List inputs) {
+ public HiveUnion(RelOptCluster cluster, RelTraitSet traits, List inputs) {
super(cluster, traits, inputs, true);
}
@Override
- public SetOpRel copy(RelTraitSet traitSet, List inputs, boolean all) {
- return new HiveUnionRel(this.getCluster(), traitSet, inputs);
+ public SetOp copy(RelTraitSet traitSet, List inputs, boolean all) {
+ return new HiveUnion(this.getCluster(), traitSet, inputs);
}
public void implement(Implementor implementor) {
@@ -51,7 +51,7 @@ public RelNode createSetOp(SqlKind kind, List inputs, boolean all) {
if (kind != SqlKind.UNION) {
throw new IllegalStateException("Expected to get Set operator of type Union. Found : " + kind);
}
- return new HiveUnionRel(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs);
+ return new HiveUnion(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs);
}
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
similarity index 67%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
index d6581e64fc8ea183666ea6c91397378456461088..8b90a15b5517a0b83c22b7a9bbbd90398c45184a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectMergeRule.java
@@ -15,16 +15,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
-import org.eigenbase.rel.rules.MergeProjectRule;
+import org.apache.calcite.rel.rules.ProjectMergeRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
//Currently not used, turn this on later
-public class HiveMergeProjectRule extends MergeProjectRule {
- public static final HiveMergeProjectRule INSTANCE = new HiveMergeProjectRule();
+public class HiveProjectMergeRule extends ProjectMergeRule {
+ public static final HiveProjectMergeRule INSTANCE = new HiveProjectMergeRule();
- public HiveMergeProjectRule() {
- super(true, HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+ public HiveProjectMergeRule() {
+ super(true, HiveProject.DEFAULT_PROJECT_FACTORY);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePartitionPruneRule.java
similarity index 59%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePartitionPruneRule.java
index ee19a6cbab0597242214e915745631f76214f70f..ba280552ae7a381e6695eb852f4ecd021c6c29ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePartitionPruneRule.java
@@ -15,41 +15,41 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.Pair;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.relopt.RelOptRule;
-import org.eigenbase.relopt.RelOptRuleCall;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.util.Pair;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
-public class HivePartitionPrunerRule extends RelOptRule {
+public class HivePartitionPruneRule extends RelOptRule {
HiveConf conf;
- public HivePartitionPrunerRule(HiveConf conf) {
- super(operand(HiveFilterRel.class, operand(HiveTableScanRel.class, none())));
+ public HivePartitionPruneRule(HiveConf conf) {
+ super(operand(HiveFilter.class, operand(HiveTableScan.class, none())));
this.conf = conf;
}
@Override
public void onMatch(RelOptRuleCall call) {
- HiveFilterRel filter = call.rel(0);
- HiveTableScanRel tScan = call.rel(1);
+ HiveFilter filter = call.rel(0);
+ HiveTableScan tScan = call.rel(1);
perform(call, filter, tScan);
}
- protected void perform(RelOptRuleCall call, FilterRelBase filter,
- HiveTableScanRel tScan) {
+ protected void perform(RelOptRuleCall call, Filter filter,
+ HiveTableScan tScan) {
RelOptHiveTable hiveTable = (RelOptHiveTable) tScan.getTable();
RexNode predicate = filter.getCondition();
- Pair predicates = PartitionPruner
+ Pair predicates = PartitionPrune
.extractPartitionPredicates(filter.getCluster(), hiveTable, predicate);
RexNode partColExpr = predicates.left;
hiveTable.computePartitionList(conf, partColExpr);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
similarity index 60%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
index 1c483eabcc1aa43cc80d7b71e21a4ae4d30a7e12..75b54e7febed98d5146e0e04d1daf17f990823a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveFilterJoinRule.java
@@ -15,37 +15,37 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
-import java.util.BitSet;
import java.util.List;
import java.util.ListIterator;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.RelFactories;
-import org.eigenbase.rel.rules.PushFilterPastJoinRule;
-import org.eigenbase.relopt.RelOptRule;
-import org.eigenbase.relopt.RelOptRuleCall;
-import org.eigenbase.relopt.RelOptRuleOperand;
-import org.eigenbase.relopt.RelOptUtil.InputFinder;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.sql.SqlKind;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptRuleOperand;
+import org.apache.calcite.plan.RelOptUtil.InputFinder;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.rules.FilterJoinRule;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-public abstract class HivePushFilterPastJoinRule extends PushFilterPastJoinRule {
+public abstract class HiveFilterJoinRule extends FilterJoinRule {
- public static final HivePushFilterPastJoinRule FILTER_ON_JOIN = new HivePushFilterIntoJoinRule();
+ public static final HiveFilterJoinRule FILTER_ON_JOIN = new HivePushIntoFilterJoinRule();
- public static final HivePushFilterPastJoinRule JOIN = new HivePushDownJoinConditionRule();
+ public static final HiveFilterJoinRule JOIN = new HivePushDownFilterJoinRule();
/**
* Creates a PushFilterPastJoinRule with an explicit root operand.
*/
- protected HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, boolean smart,
+ protected HiveFilterJoinRule(RelOptRuleOperand operand, String id, boolean smart,
RelFactories.FilterFactory filterFactory, RelFactories.ProjectFactory projectFactory) {
super(operand, id, smart, filterFactory, projectFactory);
}
@@ -54,32 +54,32 @@ protected HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, boole
* Rule that tries to push filter expressions into a join condition and into
* the inputs of the join.
*/
- public static class HivePushFilterIntoJoinRule extends HivePushFilterPastJoinRule {
- public HivePushFilterIntoJoinRule() {
- super(RelOptRule.operand(FilterRelBase.class,
- RelOptRule.operand(JoinRelBase.class, RelOptRule.any())),
- "HivePushFilterPastJoinRule:filter", true, HiveFilterRel.DEFAULT_FILTER_FACTORY,
- HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+ public static class HivePushIntoFilterJoinRule extends HiveFilterJoinRule {
+ public HivePushIntoFilterJoinRule() {
+ super(RelOptRule.operand(Filter.class,
+ RelOptRule.operand(Join.class, RelOptRule.any())),
+ "HiveFilterJoinRule:filter", true, HiveFilter.DEFAULT_FILTER_FACTORY,
+ HiveProject.DEFAULT_PROJECT_FACTORY);
}
@Override
public void onMatch(RelOptRuleCall call) {
- FilterRelBase filter = call.rel(0);
- JoinRelBase join = call.rel(1);
+ Filter filter = call.rel(0);
+ Join join = call.rel(1);
super.perform(call, filter, join);
}
}
- public static class HivePushDownJoinConditionRule extends HivePushFilterPastJoinRule {
- public HivePushDownJoinConditionRule() {
- super(RelOptRule.operand(JoinRelBase.class, RelOptRule.any()),
- "HivePushFilterPastJoinRule:no-filter", true, HiveFilterRel.DEFAULT_FILTER_FACTORY,
- HiveProjectRel.DEFAULT_PROJECT_FACTORY);
+ public static class HivePushDownFilterJoinRule extends HiveFilterJoinRule {
+ public HivePushDownFilterJoinRule() {
+ super(RelOptRule.operand(Join.class, RelOptRule.any()),
+ "HiveFilterJoinRule:no-filter", true, HiveFilter.DEFAULT_FILTER_FACTORY,
+ HiveProject.DEFAULT_PROJECT_FACTORY);
}
@Override
public void onMatch(RelOptRuleCall call) {
- JoinRelBase join = call.rel(0);
+ Join join = call.rel(0);
super.perform(call, null, join);
}
}
@@ -91,7 +91,7 @@ public void onMatch(RelOptRuleCall call) {
*/
@Override
protected void validateJoinFilters(List aboveFilters, List joinFilters,
- JoinRelBase join, JoinRelType joinType) {
+ Join join, JoinRelType joinType) {
if (joinType.equals(JoinRelType.INNER)) {
ListIterator filterIter = joinFilters.listIterator();
while (filterIter.hasNext()) {
@@ -135,17 +135,17 @@ protected void validateJoinFilters(List aboveFilters, List joi
}
}
- private boolean filterRefersToBothSidesOfJoin(RexNode filter, JoinRelBase j) {
+ private boolean filterRefersToBothSidesOfJoin(RexNode filter, Join j) {
boolean refersToBothSides = false;
int joinNoOfProjects = j.getRowType().getFieldCount();
- BitSet filterProjs = new BitSet(joinNoOfProjects);
- BitSet allLeftProjs = new BitSet(joinNoOfProjects);
- BitSet allRightProjs = new BitSet(joinNoOfProjects);
- allLeftProjs.set(0, j.getInput(0).getRowType().getFieldCount(), true);
- allRightProjs.set(j.getInput(0).getRowType().getFieldCount(), joinNoOfProjects, true);
+ ImmutableBitSet filterProjs = ImmutableBitSet.of(joinNoOfProjects);
+ ImmutableBitSet allLeftProjs = filterProjs.union(
+ ImmutableBitSet.range(0, j.getInput(0).getRowType().getFieldCount()));
+ ImmutableBitSet allRightProjs = filterProjs.union(
+ ImmutableBitSet.range(j.getInput(0).getRowType().getFieldCount(), joinNoOfProjects));
- InputFinder inputFinder = new InputFinder(filterProjs);
+ InputFinder inputFinder = new InputFinder();
filter.accept(inputFinder);
if (allLeftProjs.intersects(filterProjs) && allRightProjs.intersects(filterProjs))
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java
similarity index 89%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java
index bdc8373877c1684855d256c9d45743f383fc7615..2fb9a52eb3e77afdd349b101367477a2b1cb6ca8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/PartitionPrune.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.rules;
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
import java.util.ArrayList;
import java.util.HashSet;
@@ -23,23 +23,23 @@
import java.util.List;
import java.util.Set;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexVisitorImpl;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.util.Pair;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexLiteral;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexVisitorImpl;
-import org.eigenbase.sql.fun.SqlStdOperatorTable;
-import org.eigenbase.util.Pair;
-
-public class PartitionPruner {
+
+public class PartitionPrune {
/**
* Breaks the predicate into 2 pieces. The first piece is the expressions that
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
similarity index 82%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
index 28bf2ad506656b78894467c30364d751b180676e..b52779cd8774d8189be54e2836a0922c2cf75582 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/FilterSelectivityEstimator.java
@@ -15,30 +15,24 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.stats;
-
-import java.util.BitSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.relopt.RelOptUtil;
-import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexVisitorImpl;
-import org.eigenbase.sql.SqlKind;
-import org.eigenbase.sql.SqlOperator;
-import org.eigenbase.sql.type.SqlTypeUtil;
-
-import com.google.common.collect.Sets;
+package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
+
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.RelOptUtil.InputReferencedVisitor;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexVisitorImpl;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.type.SqlTypeUtil;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
public class FilterSelectivityEstimator extends RexVisitorImpl {
private final RelNode childRel;
@@ -231,14 +225,14 @@ private Double getMaxNDV(RexCall call) {
}
private boolean isPartitionPredicate(RexNode expr, RelNode r) {
- if (r instanceof ProjectRelBase) {
- expr = RelOptUtil.pushFilterPastProject(expr, (ProjectRelBase) r);
- return isPartitionPredicate(expr, ((ProjectRelBase) r).getChild());
- } else if (r instanceof FilterRelBase) {
- return isPartitionPredicate(expr, ((FilterRelBase) r).getChild());
- } else if (r instanceof HiveTableScanRel) {
- RelOptHiveTable table = (RelOptHiveTable) ((HiveTableScanRel) r).getTable();
- BitSet cols = RelOptUtil.InputFinder.bits(expr);
+ if (r instanceof Project) {
+ expr = RelOptUtil.pushFilterPastProject(expr, (Project) r);
+ return isPartitionPredicate(expr, ((Project) r).getInput());
+ } else if (r instanceof Filter) {
+ return isPartitionPredicate(expr, ((Filter) r).getInput());
+ } else if (r instanceof HiveTableScan) {
+ RelOptHiveTable table = (RelOptHiveTable) ((HiveTableScan) r).getTable();
+ ImmutableBitSet cols = RelOptUtil.InputFinder.bits(expr);
return table.containsPartitionColumnsOnly(cols);
}
return false;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistinctRowCount.java
similarity index 62%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistinctRowCount.java
index 4be57b110c1a45819467d55e8a69e5529989c8f6..12204014560b6b533f90b76790394f97c16c2fff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdDistinctRowCount.java
@@ -15,28 +15,27 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.stats;
+package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
-import java.util.BitSet;
import java.util.List;
-import net.hydromatic.optiq.BuiltinMethod;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
+import org.apache.calcite.plan.RelOptCost;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMdDistinctRowCount;
+import org.apache.calcite.rel.metadata.RelMdUtil;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.BuiltInMethod;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveCost;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
import org.apache.hadoop.hive.ql.plan.ColStatistics;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.ChainedRelMetadataProvider;
-import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMdDistinctRowCount;
-import org.eigenbase.rel.metadata.RelMdUtil;
-import org.eigenbase.rel.metadata.RelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.relopt.RelOptCost;
-import org.eigenbase.rex.RexNode;
import com.google.common.collect.ImmutableList;
@@ -49,31 +48,31 @@
.of(ImmutableList.of(
ReflectiveRelMetadataProvider.reflectiveSource(
- BuiltinMethod.DISTINCT_ROW_COUNT.method, INSTANCE),
+ BuiltInMethod.DISTINCT_ROW_COUNT.method, INSTANCE),
ReflectiveRelMetadataProvider.reflectiveSource(
- BuiltinMethod.CUMULATIVE_COST.method, INSTANCE)));
+ BuiltInMethod.CUMULATIVE_COST.method, INSTANCE)));
private HiveRelMdDistinctRowCount() {
}
// Catch-all rule when none of the others apply.
@Override
- public Double getDistinctRowCount(RelNode rel, BitSet groupKey,
+ public Double getDistinctRowCount(RelNode rel, ImmutableBitSet groupKey,
RexNode predicate) {
- if (rel instanceof HiveTableScanRel) {
- return getDistinctRowCount((HiveTableScanRel) rel, groupKey, predicate);
+ if (rel instanceof HiveTableScan) {
+ return getDistinctRowCount((HiveTableScan) rel, groupKey, predicate);
}
/*
- * For now use Optiq' default formulas for propagating NDVs up the Query
+ * For now use Calcite' default formulas for propagating NDVs up the Query
* Tree.
*/
return super.getDistinctRowCount(rel, groupKey, predicate);
}
- private Double getDistinctRowCount(HiveTableScanRel htRel, BitSet groupKey,
+ private Double getDistinctRowCount(HiveTableScan htRel, ImmutableBitSet groupKey,
RexNode predicate) {
- List projIndxLst = HiveOptiqUtil
+ List projIndxLst = HiveCalciteUtil
.translateBitSetToProjIndx(groupKey);
List colStats = htRel.getColStat(projIndxLst);
Double noDistinctRows = 1.0;
@@ -85,17 +84,16 @@ private Double getDistinctRowCount(HiveTableScanRel htRel, BitSet groupKey,
}
public static Double getDistinctRowCount(RelNode r, int indx) {
- BitSet bitSetOfRqdProj = new BitSet();
- bitSetOfRqdProj.set(indx);
+ ImmutableBitSet bitSetOfRqdProj = ImmutableBitSet.of(indx);
return RelMetadataQuery.getDistinctRowCount(r, bitSetOfRqdProj, r
.getCluster().getRexBuilder().makeLiteral(true));
}
@Override
- public Double getDistinctRowCount(JoinRelBase rel, BitSet groupKey,
+ public Double getDistinctRowCount(Join rel, ImmutableBitSet groupKey,
RexNode predicate) {
- if (rel instanceof HiveJoinRel) {
- HiveJoinRel hjRel = (HiveJoinRel) rel;
+ if (rel instanceof HiveJoin) {
+ HiveJoin hjRel = (HiveJoin) rel;
//TODO: Improve this
if (hjRel.isLeftSemiJoin()) {
return RelMetadataQuery.getDistinctRowCount(hjRel.getLeft(), groupKey,
@@ -112,7 +110,7 @@ public Double getDistinctRowCount(JoinRelBase rel, BitSet groupKey,
/*
* Favor Broad Plans over Deep Plans.
*/
- public RelOptCost getCumulativeCost(HiveJoinRel rel) {
+ public RelOptCost getCumulativeCost(HiveJoin rel) {
RelOptCost cost = RelMetadataQuery.getNonCumulativeCost(rel);
List inputs = rel.getInputs();
RelOptCost maxICost = HiveCost.ZERO;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java
similarity index 81%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java
index 8c7f643940b74dd7743635c3eaa046d52d41346f..dabbe280278dc80f00f0240a0c615fe6c7b8533a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdRowCount.java
@@ -16,41 +16,38 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.stats;
+package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
import java.util.ArrayList;
-import java.util.BitSet;
import java.util.List;
import java.util.Set;
-import net.hydromatic.optiq.BuiltinMethod;
-import net.hydromatic.optiq.util.BitSets;
-
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.hep.HepRelVertex;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelVisitor;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.core.SemiJoin;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMdRowCount;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.util.BuiltInMethod;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.calcite.util.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.RelVisitor;
-import org.eigenbase.rel.TableAccessRelBase;
-import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMdRowCount;
-import org.eigenbase.rel.metadata.RelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.rel.rules.SemiJoinRel;
-import org.eigenbase.relopt.RelOptUtil;
-import org.eigenbase.relopt.hep.HepRelVertex;
-import org.eigenbase.rex.RexBuilder;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexUtil;
-import org.eigenbase.sql.fun.SqlStdOperatorTable;
-import org.eigenbase.util.Holder;
-import org.eigenbase.util.Pair;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
public class HiveRelMdRowCount extends RelMdRowCount {
@@ -58,13 +55,13 @@
public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider
- .reflectiveSource(BuiltinMethod.ROW_COUNT.method, new HiveRelMdRowCount());
+ .reflectiveSource(BuiltInMethod.ROW_COUNT.method, new HiveRelMdRowCount());
protected HiveRelMdRowCount() {
super();
}
- public Double getRowCount(JoinRelBase join) {
+ public Double getRowCount(Join join) {
PKFKRelationInfo pkfk = analyzeJoinForPKFK(join);
if (pkfk != null) {
double selectivity = (pkfk.pkInfo.selectivity * pkfk.ndvScalingFactor);
@@ -79,7 +76,7 @@ public Double getRowCount(JoinRelBase join) {
return join.getRows();
}
- public Double getRowCount(SemiJoinRel rel) {
+ public Double getRowCount(SemiJoin rel) {
PKFKRelationInfo pkfk = analyzeJoinForPKFK(rel);
if (pkfk != null) {
double selectivity = (pkfk.pkInfo.selectivity * pkfk.ndvScalingFactor);
@@ -162,7 +159,7 @@ public String toString() {
* or Fact roj Dim b) The selectivity factor applied on the Fact Table should
* be 1.
*/
- public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) {
+ public static PKFKRelationInfo analyzeJoinForPKFK(Join joinRel) {
RelNode left = joinRel.getInputs().get(0);
RelNode right = joinRel.getInputs().get(1);
@@ -180,12 +177,11 @@ public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) {
List leftFilters = new ArrayList();
List rightFilters = new ArrayList();
List joinFilters = new ArrayList(initJoinFilters);
- final Holder joinTypeHolder = Holder.of(joinRel.getJoinType());
// @todo: remove this. 8/28/14 hb
// for now adding because RelOptUtil.classifyFilters has an assertion about
// column counts that is not true for semiJoins.
- if (joinRel instanceof SemiJoinRel) {
+ if (joinRel instanceof SemiJoin) {
return null;
}
@@ -207,8 +203,8 @@ public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) {
.composeConjunction(rexBuilder, leftFilters, true);
RexNode rightPred = RexUtil.composeConjunction(rexBuilder, rightFilters,
true);
- BitSet lBitSet = BitSets.of(leftColIdx);
- BitSet rBitSet = BitSets.of(rightColIdx);
+ ImmutableBitSet lBitSet = ImmutableBitSet.of(leftColIdx);
+ ImmutableBitSet rBitSet = ImmutableBitSet.of(rightColIdx);
/*
* If the form is Dim loj F or Fact roj Dim or Dim semij Fact then return
@@ -216,7 +212,7 @@ public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) {
*/
boolean leftIsKey = (joinRel.getJoinType() == JoinRelType.INNER || joinRel
.getJoinType() == JoinRelType.RIGHT)
- && !(joinRel instanceof SemiJoinRel) && isKey(lBitSet, left);
+ && !(joinRel instanceof SemiJoin) && isKey(lBitSet, left);
boolean rightIsKey = (joinRel.getJoinType() == JoinRelType.INNER || joinRel
.getJoinType() == JoinRelType.LEFT) && isKey(rBitSet, right);
@@ -296,14 +292,14 @@ public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) {
return null;
}
- private static double pkSelectivity(JoinRelBase joinRel, boolean leftChild,
+ private static double pkSelectivity(Join joinRel, boolean leftChild,
RelNode child,
double childRowCount) {
if ((leftChild && joinRel.getJoinType().generatesNullsOnRight()) ||
(!leftChild && joinRel.getJoinType().generatesNullsOnLeft())) {
return 1.0;
} else {
- HiveTableScanRel tScan = HiveRelMdUniqueKeys.getTableScan(child, true);
+ HiveTableScan tScan = HiveRelMdUniqueKeys.getTableScan(child, true);
if (tScan != null) {
double tRowCount = RelMetadataQuery.getRowCount(tScan);
return childRowCount / tRowCount;
@@ -313,11 +309,11 @@ private static double pkSelectivity(JoinRelBase joinRel, boolean leftChild,
}
}
- private static boolean isKey(BitSet c, RelNode rel) {
+ private static boolean isKey(ImmutableBitSet c, RelNode rel) {
boolean isKey = false;
- Set keys = RelMetadataQuery.getUniqueKeys(rel);
+ Set keys = RelMetadataQuery.getUniqueKeys(rel);
if (keys != null) {
- for (BitSet key : keys) {
+ for (ImmutableBitSet key : keys) {
if (key.equals(c)) {
isKey = true;
break;
@@ -332,7 +328,7 @@ private static boolean isKey(BitSet c, RelNode rel) {
* 2. both sides must reference 1 column.
* 3. If needed flip the columns.
*/
- private static Pair canHandleJoin(JoinRelBase joinRel,
+ private static Pair canHandleJoin(Join joinRel,
List leftFilters, List rightFilters,
List joinFilters) {
@@ -357,8 +353,8 @@ private static boolean isKey(BitSet c, RelNode rel) {
return null;
}
- BitSet leftCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(0));
- BitSet rightCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(1));
+ ImmutableBitSet leftCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(0));
+ ImmutableBitSet rightCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(1));
if (leftCols.cardinality() != 1 || rightCols.cardinality() != 1 ) {
return null;
@@ -367,14 +363,14 @@ private static boolean isKey(BitSet c, RelNode rel) {
int nFieldsLeft = joinRel.getLeft().getRowType().getFieldList().size();
int nFieldsRight = joinRel.getRight().getRowType().getFieldList().size();
int nSysFields = joinRel.getSystemFieldList().size();
- BitSet rightFieldsBitSet = BitSets.range(nSysFields + nFieldsLeft,
+ ImmutableBitSet rightFieldsBitSet = ImmutableBitSet.range(nSysFields + nFieldsLeft,
nSysFields + nFieldsLeft + nFieldsRight);
/*
* flip column references if join condition specified in reverse order to
* join sources.
*/
- if (BitSets.contains(rightFieldsBitSet, leftCols)) {
- BitSet t = leftCols;
+ if (rightFieldsBitSet.contains(leftCols)) {
+ ImmutableBitSet t = leftCols;
leftCols = rightCols;
rightCols = t;
}
@@ -409,12 +405,12 @@ public void visit(RelNode node, int ordinal, RelNode parent) {
node = ((HepRelVertex) node).getCurrentRel();
}
- if (node instanceof TableAccessRelBase) {
+ if (node instanceof TableScan) {
simpleTree = true;
- } else if (node instanceof ProjectRelBase) {
- simpleTree = isSimple((ProjectRelBase) node);
- } else if (node instanceof FilterRelBase) {
- simpleTree = isSimple((FilterRelBase) node);
+ } else if (node instanceof Project) {
+ simpleTree = isSimple((Project) node);
+ } else if (node instanceof Filter) {
+ simpleTree = isSimple((Filter) node);
} else {
simpleTree = false;
}
@@ -424,7 +420,7 @@ public void visit(RelNode node, int ordinal, RelNode parent) {
}
}
- private boolean isSimple(ProjectRelBase project) {
+ private boolean isSimple(Project project) {
RexNode r = project.getProjects().get(joinKey);
if (r instanceof RexInputRef) {
joinKey = ((RexInputRef) r).getIndex();
@@ -433,8 +429,8 @@ private boolean isSimple(ProjectRelBase project) {
return false;
}
- private boolean isSimple(FilterRelBase filter) {
- BitSet condBits = RelOptUtil.InputFinder.bits(filter.getCondition());
+ private boolean isSimple(Filter filter) {
+ ImmutableBitSet condBits = RelOptUtil.InputFinder.bits(filter.getCondition());
return isKey(condBits, filter);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
similarity index 87%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
index 49d2ee5a67b72fbf6134ce71de1d7260069cd16f..960ec40d3ef6c25cb76e88cfe883efd5785f5b50 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.stats;
+package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
import java.util.ArrayList;
import java.util.Collections;
@@ -23,34 +23,32 @@
import java.util.Map;
import java.util.Set;
-import net.hydromatic.optiq.BuiltinMethod;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil.JoinLeafPredicateInfo;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil.JoinPredicateInfo;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMdSelectivity;
-import org.eigenbase.rel.metadata.RelMdUtil;
-import org.eigenbase.rel.metadata.RelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMetadataQuery;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexUtil;
-import org.eigenbase.util.Pair;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMdSelectivity;
+import org.apache.calcite.rel.metadata.RelMdUtil;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.BuiltInMethod;
+import org.apache.calcite.util.Pair;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
import com.google.common.collect.ImmutableMap;
public class HiveRelMdSelectivity extends RelMdSelectivity {
public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource(
- BuiltinMethod.SELECTIVITY.method,
+ BuiltInMethod.SELECTIVITY.method,
new HiveRelMdSelectivity());
protected HiveRelMdSelectivity() {
super();
}
- public Double getSelectivity(HiveTableScanRel t, RexNode predicate) {
+ public Double getSelectivity(HiveTableScan t, RexNode predicate) {
if (predicate != null) {
FilterSelectivityEstimator filterSelEstmator = new FilterSelectivityEstimator(t);
return filterSelEstmator.estimateSelectivity(predicate);
@@ -59,14 +57,14 @@ public Double getSelectivity(HiveTableScanRel t, RexNode predicate) {
return 1.0;
}
- public Double getSelectivity(HiveJoinRel j, RexNode predicate) {
+ public Double getSelectivity(HiveJoin j, RexNode predicate) {
if (j.getJoinType().equals(JoinRelType.INNER)) {
return computeInnerJoinSelectivity(j, predicate);
}
return 1.0;
}
- private Double computeInnerJoinSelectivity(HiveJoinRel j, RexNode predicate) {
+ private Double computeInnerJoinSelectivity(HiveJoin j, RexNode predicate) {
double ndvCrossProduct = 1;
Pair predInfo =
getCombinedPredicateForJoin(j, predicate);
@@ -191,7 +189,7 @@ protected double exponentialBackoff(List peLst,
* @return if predicate is the join condition return (true, joinCond)
* else return (false, minusPred)
*/
- private Pair getCombinedPredicateForJoin(HiveJoinRel j, RexNode additionalPredicate) {
+ private Pair getCombinedPredicateForJoin(HiveJoin j, RexNode additionalPredicate) {
RexNode minusPred = RelMdUtil.minusPreds(j.getCluster().getRexBuilder(), additionalPredicate,
j.getCondition());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
similarity index 68%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
index c3c8bdd2466b0f46d49437fcf8d49dbb689cfcda..95515b23e409d73d5c61e107931727add3f992a6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdUniqueKeys.java
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.stats;
+package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
import java.util.BitSet;
import java.util.HashMap;
@@ -25,44 +25,44 @@
import java.util.Map;
import java.util.Set;
-import net.hydromatic.optiq.BuiltinMethod;
-import net.hydromatic.optiq.util.BitSets;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.hep.HepRelVertex;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.metadata.BuiltInMetadata;
+import org.apache.calcite.rel.metadata.Metadata;
+import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMdUniqueKeys;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.BitSets;
+import org.apache.calcite.util.BuiltInMethod;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
import org.apache.hadoop.hive.ql.plan.ColStatistics;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.BuiltInMetadata;
-import org.eigenbase.rel.metadata.Metadata;
-import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMdUniqueKeys;
-import org.eigenbase.rel.metadata.RelMetadataProvider;
-import org.eigenbase.relopt.RelOptUtil;
-import org.eigenbase.relopt.hep.HepRelVertex;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexNode;
import com.google.common.base.Function;
public class HiveRelMdUniqueKeys {
public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider
- .reflectiveSource(BuiltinMethod.UNIQUE_KEYS.method,
+ .reflectiveSource(BuiltInMethod.UNIQUE_KEYS.method,
new HiveRelMdUniqueKeys());
/*
* Infer Uniquenes if: - rowCount(col) = ndv(col) - TBD for numerics: max(col)
* - min(col) = rowCount(col)
*
- * Why are we intercepting ProjectRelbase and not TableScan? Because if we
+ * Why are we intercepting Project and not TableScan? Because if we
* have a method for TableScan, it will not know which columns to check for.
* Inferring Uniqueness for all columns is very expensive right now. The flip
* side of doing this is, it only works post Field Trimming.
*/
- public Set getUniqueKeys(ProjectRelBase rel, boolean ignoreNulls) {
+ public Set getUniqueKeys(Project rel, boolean ignoreNulls) {
- HiveTableScanRel tScan = getTableScan(rel.getChild(), false);
+ HiveTableScan tScan = getTableScan(rel.getInput(), false);
if ( tScan == null ) {
Function fn = RelMdUniqueKeys.SOURCE.apply(
@@ -88,7 +88,7 @@
double numRows = tScan.getRows();
List colStats = tScan.getColStat(BitSets
.toList(projectedCols));
- Set keys = new HashSet();
+ Set keys = new HashSet();
colStatsPos = 0;
for (ColStatistics cStat : colStats) {
@@ -104,8 +104,7 @@
isKey = (Math.abs(numRows - r) < RelOptUtil.EPSILON);
}
if ( isKey ) {
- BitSet key = new BitSet();
- key.set(posMap.get(colStatsPos));
+ ImmutableBitSet key = ImmutableBitSet.of(posMap.get(colStatsPos));
keys.add(key);
}
colStatsPos++;
@@ -120,20 +119,20 @@
* by the invocation on the Project.
* In case of getting the base rowCount of a Path, keep going past a Project.
*/
- static HiveTableScanRel getTableScan(RelNode r, boolean traverseProject) {
+ static HiveTableScan getTableScan(RelNode r, boolean traverseProject) {
- while (r != null && !(r instanceof HiveTableScanRel)) {
+ while (r != null && !(r instanceof HiveTableScan)) {
if (r instanceof HepRelVertex) {
r = ((HepRelVertex) r).getCurrentRel();
- } else if (r instanceof FilterRelBase) {
- r = ((FilterRelBase) r).getChild();
- } else if (traverseProject && r instanceof ProjectRelBase) {
- r = ((ProjectRelBase) r).getChild();
+ } else if (r instanceof Filter) {
+ r = ((Filter) r).getInput();
+ } else if (traverseProject && r instanceof Project) {
+ r = ((Project) r).getInput();
} else {
r = null;
}
}
- return r == null ? null : (HiveTableScanRel) r;
+ return r == null ? null : (HiveTableScan) r;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
similarity index 93%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index 58320c73aafbfeec025f52ee813b3cfd06fa0821..3eb11cf2e30aaadd753c81f6900026627e6a29ad 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -15,23 +15,22 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Calendar;
-import net.hydromatic.avatica.ByteString;
-
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
+import org.apache.calcite.avatica.ByteString;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.TableAccessRelBase;
-import org.eigenbase.rex.RexLiteral;
-import org.eigenbase.sql.type.SqlTypeName;
class ASTBuilder {
@@ -53,15 +52,15 @@ static ASTNode destNode() {
"TOK_TMP_FILE")).node();
}
- static ASTNode table(TableAccessRelBase scan) {
+ static ASTNode table(TableScan scan) {
RelOptHiveTable hTbl = (RelOptHiveTable) scan.getTable();
ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add(
ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME")
.add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName())
.add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName()));
- // NOTE: Optiq considers tbls to be equal if their names are the same. Hence
- // we need to provide Optiq the fully qualified table name (dbname.tblname)
+ // NOTE: Calcite considers tbls to be equal if their names are the same. Hence
+ // we need to provide Calcite the fully qualified table name (dbname.tblname)
// and not the user provided aliases.
// However in HIVE DB name can not appear in select list; in case of join
// where table names differ only in DB name, Hive would require user
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
similarity index 82%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index a217d70e48da0835fed3565ba510bcc9e86c0fa1..c02a65e2041e4742a56cf4a935da0a7c04d18fdb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.math.BigDecimal;
import java.util.ArrayList;
@@ -23,44 +23,43 @@
import java.util.List;
import java.util.Map;
-import net.hydromatic.optiq.util.BitSets;
-
+import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelVisitor;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.core.SemiJoin;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.core.Union;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexFieldAccess;
+import org.apache.calcite.rex.RexFieldCollation;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexOver;
+import org.apache.calcite.rex.RexVisitorImpl;
+import org.apache.calcite.rex.RexWindow;
+import org.apache.calcite.rex.RexWindowBound;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.BitSets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
-import org.eigenbase.rel.AggregateCall;
-import org.eigenbase.rel.AggregateRelBase;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelFieldCollation;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.RelVisitor;
-import org.eigenbase.rel.SortRel;
-import org.eigenbase.rel.TableAccessRelBase;
-import org.eigenbase.rel.UnionRelBase;
-import org.eigenbase.rel.rules.SemiJoinRel;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexFieldAccess;
-import org.eigenbase.rex.RexFieldCollation;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexLiteral;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexOver;
-import org.eigenbase.rex.RexVisitorImpl;
-import org.eigenbase.rex.RexWindow;
-import org.eigenbase.rex.RexWindowBound;
-import org.eigenbase.sql.SqlKind;
-import org.eigenbase.sql.SqlOperator;
-import org.eigenbase.sql.type.SqlTypeName;
import com.google.common.collect.Iterables;
@@ -70,12 +69,12 @@
private RelNode root;
private HiveAST hiveAST;
private RelNode from;
- private FilterRelBase where;
- private AggregateRelBase groupBy;
- private FilterRelBase having;
- private ProjectRelBase select;
- private SortRel order;
- private SortRel limit;
+ private Filter where;
+ private Aggregate groupBy;
+ private Filter having;
+ private Project select;
+ private Sort order;
+ private Sort limit;
private Schema schema;
@@ -88,7 +87,7 @@
}
public static ASTNode convert(final RelNode relNode, List resultSchema)
- throws OptiqSemanticException {
+ throws CalciteSemanticException {
RelNode root = PlanModifierForASTConv.convertOpTree(relNode, resultSchema);
ASTConverter c = new ASTConverter(root, 0);
return c.convert();
@@ -163,20 +162,20 @@ private ASTNode convert() {
/*
* 7. Order Use in Order By from the block above. RelNode has no pointer to
* parent hence we need to go top down; but OB at each block really belong
- * to its src/from. Hence the need to pass in sortRel for each block from
+ * to its src/from. Hence the need to pass in sort for each block from
* its parent.
*/
- convertOBToASTNode((HiveSortRel) order);
+ convertOBToASTNode((HiveSort) order);
// 8. Limit
- convertLimitToASTNode((HiveSortRel) limit);
+ convertLimitToASTNode((HiveSort) limit);
return hiveAST.getAST();
}
- private void convertLimitToASTNode(HiveSortRel limit) {
+ private void convertLimitToASTNode(HiveSort limit) {
if (limit != null) {
- HiveSortRel hiveLimit = (HiveSortRel) limit;
+ HiveSort hiveLimit = (HiveSort) limit;
RexNode limitExpr = hiveLimit.getFetchExpr();
if (limitExpr != null) {
Object val = ((RexLiteral) limitExpr).getValue2();
@@ -185,14 +184,14 @@ private void convertLimitToASTNode(HiveSortRel limit) {
}
}
- private void convertOBToASTNode(HiveSortRel order) {
+ private void convertOBToASTNode(HiveSort order) {
if (order != null) {
- HiveSortRel hiveSort = (HiveSortRel) order;
+ HiveSort hiveSort = (HiveSort) order;
if (!hiveSort.getCollation().getFieldCollations().isEmpty()) {
// 1 Add order by token
ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY");
- schema = new Schema((HiveSortRel) hiveSort);
+ schema = new Schema((HiveSort) hiveSort);
Map obRefToCallMap = hiveSort.getInputRefToCallMap();
RexNode obExpr;
ASTNode astCol;
@@ -237,23 +236,23 @@ private QueryBlockInfo convertSource(RelNode r) {
Schema s;
ASTNode ast;
- if (r instanceof TableAccessRelBase) {
- TableAccessRelBase f = (TableAccessRelBase) r;
+ if (r instanceof TableScan) {
+ TableScan f = (TableScan) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
- } else if (r instanceof JoinRelBase) {
- JoinRelBase join = (JoinRelBase) r;
+ } else if (r instanceof Join) {
+ Join join = (Join) r;
QueryBlockInfo left = convertSource(join.getLeft());
QueryBlockInfo right = convertSource(join.getRight());
s = new Schema(left.schema, right.schema);
ASTNode cond = join.getCondition().accept(new RexVisitor(s));
- boolean semiJoin = join instanceof SemiJoinRel;
+ boolean semiJoin = join instanceof SemiJoin;
ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond, semiJoin);
if (semiJoin)
s = left.schema;
- } else if (r instanceof UnionRelBase) {
- RelNode leftInput = ((UnionRelBase) r).getInput(0);
- RelNode rightInput = ((UnionRelBase) r).getInput(1);
+ } else if (r instanceof Union) {
+ RelNode leftInput = ((Union) r).getInput(0);
+ RelNode rightInput = ((Union) r).getInput(1);
ASTConverter leftConv = new ASTConverter(leftInput, this.derivedTableCount);
ASTConverter rightConv = new ASTConverter(rightInput, this.derivedTableCount);
@@ -264,7 +263,7 @@ private QueryBlockInfo convertSource(RelNode r) {
String sqAlias = nextAlias();
ast = ASTBuilder.subQuery(unionAST, sqAlias);
- s = new Schema((UnionRelBase) r, sqAlias);
+ s = new Schema((Union) r, sqAlias);
} else {
ASTConverter src = new ASTConverter(r, this.derivedTableCount);
ASTNode srcAST = src.convert();
@@ -277,16 +276,16 @@ private QueryBlockInfo convertSource(RelNode r) {
class QBVisitor extends RelVisitor {
- public void handle(FilterRelBase filter) {
- RelNode child = filter.getChild();
- if (child instanceof AggregateRelBase && !((AggregateRelBase) child).getGroupSet().isEmpty()) {
+ public void handle(Filter filter) {
+ RelNode child = filter.getInput();
+ if (child instanceof Aggregate && !((Aggregate) child).getGroupSet().isEmpty()) {
ASTConverter.this.having = filter;
} else {
ASTConverter.this.where = filter;
}
}
- public void handle(ProjectRelBase project) {
+ public void handle(Project project) {
if (ASTConverter.this.select == null) {
ASTConverter.this.select = project;
} else {
@@ -297,23 +296,23 @@ public void handle(ProjectRelBase project) {
@Override
public void visit(RelNode node, int ordinal, RelNode parent) {
- if (node instanceof TableAccessRelBase) {
+ if (node instanceof TableScan) {
ASTConverter.this.from = node;
- } else if (node instanceof FilterRelBase) {
- handle((FilterRelBase) node);
- } else if (node instanceof ProjectRelBase) {
- handle((ProjectRelBase) node);
- } else if (node instanceof JoinRelBase) {
+ } else if (node instanceof Filter) {
+ handle((Filter) node);
+ } else if (node instanceof Project) {
+ handle((Project) node);
+ } else if (node instanceof Join) {
ASTConverter.this.from = node;
- } else if (node instanceof UnionRelBase) {
+ } else if (node instanceof Union) {
ASTConverter.this.from = node;
- } else if (node instanceof AggregateRelBase) {
- ASTConverter.this.groupBy = (AggregateRelBase) node;
- } else if (node instanceof SortRel) {
+ } else if (node instanceof Aggregate) {
+ ASTConverter.this.groupBy = (Aggregate) node;
+ } else if (node instanceof Sort) {
if (ASTConverter.this.select != null) {
ASTConverter.this.from = node;
} else {
- SortRel hiveSortRel = (SortRel) node;
+ Sort hiveSortRel = (Sort) node;
if (hiveSortRel.getCollation().getFieldCollations().isEmpty())
ASTConverter.this.limit = hiveSortRel;
else
@@ -529,33 +528,32 @@ public QueryBlockInfo(Schema schema, ASTNode ast) {
private static final long serialVersionUID = 1L;
- Schema(TableAccessRelBase scan) {
+ Schema(TableScan scan) {
String tabName = ((RelOptHiveTable) scan.getTable()).getTableAlias();
for (RelDataTypeField field : scan.getRowType().getFieldList()) {
add(new ColumnInfo(tabName, field.getName()));
}
}
- Schema(ProjectRelBase select, String alias) {
+ Schema(Project select, String alias) {
for (RelDataTypeField field : select.getRowType().getFieldList()) {
add(new ColumnInfo(alias, field.getName()));
}
}
- Schema(UnionRelBase unionRel, String alias) {
+ Schema(Union unionRel, String alias) {
for (RelDataTypeField field : unionRel.getRowType().getFieldList()) {
add(new ColumnInfo(alias, field.getName()));
}
}
- @SuppressWarnings("unchecked")
Schema(Schema left, Schema right) {
for (ColumnInfo cI : Iterables.concat(left, right)) {
add(cI);
}
}
- Schema(Schema src, AggregateRelBase gBy) {
+ Schema(Schema src, Aggregate gBy) {
for (int i : BitSets.toIter(gBy.getGroupSet())) {
ColumnInfo cI = src.get(i);
add(cI);
@@ -578,16 +576,16 @@ public QueryBlockInfo(Schema schema, ASTNode ast) {
/**
* Assumption:
- * 1. ProjectRel will always be child of SortRel.
- * 2. In Optiq every projection in ProjectRelBase is uniquely named
+ * 1. Project will always be child of Sort.
+ * 2. In Calcite every projection in Project is uniquely named
* (unambigous) without using table qualifier (table name).
*
* @param order
- * Hive Sort Rel Node
+ * Hive Sort Node
* @return Schema
*/
- public Schema(HiveSortRel order) {
- ProjectRelBase select = (ProjectRelBase) order.getChild();
+ public Schema(HiveSort order) {
+ Project select = (Project) order.getInput();
for (String projName : select.getRowType().getFieldNames()) {
add(new ColumnInfo(null, projName));
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
similarity index 94%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index 65c6322d68ef234fbf55a4a36b4cb47e69c30cac..bebceb547e147dfa3a1698425afec28a8f8c293a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.sql.Date;
import java.util.ArrayList;
@@ -30,13 +30,13 @@
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexLiteral;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexVisitorImpl;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexVisitorImpl;
/*
* convert a RexNode to an ExprNodeDesc
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinCondTypeCheckProcFactory.java
similarity index 98%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinCondTypeCheckProcFactory.java
index 89c57b9a636480c16c2c76c92b2eeb6fc4e2eba5..4cd01c936ea4f97db95f6e0795f69d8d4a901505 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinCondTypeCheckProcFactory.java
@@ -15,7 +15,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.Stack;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -38,17 +47,8 @@
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Stack;
-
/**
- * JoinCondTypeCheckProcFactory is used by Optiq planner(CBO) to generate Join Conditions from Join Condition AST.
+ * JoinCondTypeCheckProcFactory is used by Calcite planner(CBO) to generate Join Conditions from Join Condition AST.
* Reasons for sub class:
* 1. Additional restrictions on what is supported in Join Conditions
* 2. Column handling is different
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinTypeCheckCtx.java
similarity index 93%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinTypeCheckCtx.java
index fdee66b96f26ca915c1c77400434ca2dd0304033..bbd4723f638b276cf1f5d3cb8b1cc0296bb3b576 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/JoinTypeCheckCtx.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.util.List;
@@ -27,7 +27,7 @@
import com.google.common.collect.ImmutableList;
/**
- * JoinTypeCheckCtx is used by Optiq planner(CBO) to generate Join Conditions from Join Condition AST.
+ * JoinTypeCheckCtx is used by Calcite planner(CBO) to generate Join Conditions from Join Condition AST.
* Reasons for sub class:
* 1. Join Conditions can not handle:
* a. Stateful Functions
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
similarity index 67%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
index 57f030b2e4f084ee476b1ae41a2710b5aed53594..10eefac29ad6719dbdc9fa1a950d26675b537720 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
@@ -15,44 +15,42 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.hep.HepRelVertex;
+import org.apache.calcite.plan.volcano.RelSubset;
+import org.apache.calcite.rel.RelCollationImpl;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.SingleRel;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.core.SetOp;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.rules.MultiJoin;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.util.Pair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.eigenbase.rel.AggregateCall;
-import org.eigenbase.rel.AggregateRelBase;
-import org.eigenbase.rel.Aggregation;
-import org.eigenbase.rel.EmptyRel;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.OneRowRelBase;
-import org.eigenbase.rel.ProjectRelBase;
-import org.eigenbase.rel.RelCollationImpl;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.SetOpRel;
-import org.eigenbase.rel.SingleRel;
-import org.eigenbase.rel.SortRel;
-import org.eigenbase.rel.rules.MultiJoinRel;
-import org.eigenbase.relopt.RelOptUtil;
-import org.eigenbase.relopt.hep.HepRelVertex;
-import org.eigenbase.relopt.volcano.RelSubset;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.sql.SqlKind;
-import org.eigenbase.util.Pair;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
@@ -61,13 +59,13 @@
private static final Log LOG = LogFactory.getLog(PlanModifierForASTConv.class);
public static RelNode convertOpTree(RelNode rel, List resultSchema)
- throws OptiqSemanticException {
+ throws CalciteSemanticException {
RelNode newTopNode = rel;
if (LOG.isDebugEnabled()) {
LOG.debug("Original plan for PlanModifier\n " + RelOptUtil.toString(newTopNode));
}
- if (!(newTopNode instanceof ProjectRelBase) && !(newTopNode instanceof SortRel)) {
+ if (!(newTopNode instanceof Project) && !(newTopNode instanceof Sort)) {
newTopNode = introduceDerivedTable(newTopNode);
if (LOG.isDebugEnabled()) {
LOG.debug("Plan after top-level introduceDerivedTable\n "
@@ -80,13 +78,13 @@ public static RelNode convertOpTree(RelNode rel, List resultSchema)
LOG.debug("Plan after nested convertOpTree\n " + RelOptUtil.toString(newTopNode));
}
- Pair topSelparentPair = HiveOptiqUtil.getTopLevelSelect(newTopNode);
+ Pair topSelparentPair = HiveCalciteUtil.getTopLevelSelect(newTopNode);
fixTopOBSchema(newTopNode, topSelparentPair, resultSchema);
if (LOG.isDebugEnabled()) {
LOG.debug("Plan after fixTopOBSchema\n " + RelOptUtil.toString(newTopNode));
}
- topSelparentPair = HiveOptiqUtil.getTopLevelSelect(newTopNode);
+ topSelparentPair = HiveCalciteUtil.getTopLevelSelect(newTopNode);
newTopNode = renameTopLevelSelectInResultSchema(newTopNode, topSelparentPair, resultSchema);
if (LOG.isDebugEnabled()) {
LOG.debug("Final plan after modifier\n " + RelOptUtil.toString(newTopNode));
@@ -96,44 +94,40 @@ public static RelNode convertOpTree(RelNode rel, List resultSchema)
private static void convertOpTree(RelNode rel, RelNode parent) {
- if (rel instanceof EmptyRel) {
- throw new RuntimeException("Found Empty Rel");
- } else if (rel instanceof HepRelVertex) {
+ if (rel instanceof HepRelVertex) {
throw new RuntimeException("Found HepRelVertex");
- } else if (rel instanceof JoinRelBase) {
+ } else if (rel instanceof Join) {
if (!validJoinParent(rel, parent)) {
introduceDerivedTable(rel, parent);
}
- } else if (rel instanceof MultiJoinRel) {
- throw new RuntimeException("Found MultiJoinRel");
- } else if (rel instanceof OneRowRelBase) {
- throw new RuntimeException("Found OneRowRelBase");
+ } else if (rel instanceof MultiJoin) {
+ throw new RuntimeException("Found MultiJoin");
} else if (rel instanceof RelSubset) {
throw new RuntimeException("Found RelSubset");
- } else if (rel instanceof SetOpRel) {
+ } else if (rel instanceof SetOp) {
// TODO: Handle more than 2 inputs for setop
if (!validSetopParent(rel, parent))
introduceDerivedTable(rel, parent);
- SetOpRel setopRel = (SetOpRel) rel;
- for (RelNode inputRel : setopRel.getInputs()) {
+ SetOp setop = (SetOp) rel;
+ for (RelNode inputRel : setop.getInputs()) {
if (!validSetopChild(inputRel)) {
- introduceDerivedTable(inputRel, setopRel);
+ introduceDerivedTable(inputRel, setop);
}
}
} else if (rel instanceof SingleRel) {
- if (rel instanceof FilterRelBase) {
+ if (rel instanceof Filter) {
if (!validFilterParent(rel, parent)) {
introduceDerivedTable(rel, parent);
}
- } else if (rel instanceof HiveSortRel) {
+ } else if (rel instanceof HiveSort) {
if (!validSortParent(rel, parent)) {
introduceDerivedTable(rel, parent);
}
- if (!validSortChild((HiveSortRel) rel)) {
- introduceDerivedTable(((HiveSortRel) rel).getChild(), rel);
+ if (!validSortChild((HiveSort) rel)) {
+ introduceDerivedTable(((HiveSort) rel).getInput(), rel);
}
- } else if (rel instanceof HiveAggregateRel) {
+ } else if (rel instanceof HiveAggregate) {
RelNode newParent = parent;
if (!validGBParent(rel, parent)) {
newParent = introduceDerivedTable(rel, parent);
@@ -156,13 +150,13 @@ private static void convertOpTree(RelNode rel, RelNode parent) {
private static void fixTopOBSchema(final RelNode rootRel,
Pair topSelparentPair, List resultSchema)
- throws OptiqSemanticException {
- if (!(topSelparentPair.getKey() instanceof SortRel)
- || !HiveOptiqUtil.orderRelNode(topSelparentPair.getKey())) {
+ throws CalciteSemanticException {
+ if (!(topSelparentPair.getKey() instanceof Sort)
+ || !HiveCalciteUtil.orderRelNode(topSelparentPair.getKey())) {
return;
}
- HiveSortRel obRel = (HiveSortRel) topSelparentPair.getKey();
- ProjectRelBase obChild = (ProjectRelBase) topSelparentPair.getValue();
+ HiveSort obRel = (HiveSort) topSelparentPair.getKey();
+ Project obChild = (Project) topSelparentPair.getValue();
if (obChild.getRowType().getFieldCount() <= resultSchema.size()) {
return;
}
@@ -181,19 +175,19 @@ private static void fixTopOBSchema(final RelNode rootRel,
if ((obChild.getRowType().getFieldCount() - inputRefToCallMap.size()) != resultSchema.size()) {
LOG.error(generateInvalidSchemaMessage(obChild, resultSchema, inputRefToCallMap.size()));
- throw new OptiqSemanticException("Result Schema didn't match Optimized Op Tree Schema");
+ throw new CalciteSemanticException("Result Schema didn't match Optimized Op Tree Schema");
}
// This removes order-by only expressions from the projections.
- HiveProjectRel replacementProjectRel = HiveProjectRel.create(obChild.getChild(), obChild
+ HiveProject replacementProjectRel = HiveProject.create(obChild.getInput(), obChild
.getChildExps().subList(0, resultSchema.size()), obChild.getRowType().getFieldNames()
.subList(0, resultSchema.size()));
obRel.replaceInput(0, replacementProjectRel);
obRel.setInputRefToCallMap(inputRefToCallMap);
}
- private static String generateInvalidSchemaMessage(ProjectRelBase topLevelProj,
+ private static String generateInvalidSchemaMessage(Project topLevelProj,
List resultSchema, int fieldsForOB) {
- String errorDesc = "Result Schema didn't match Optiq Optimized Op Tree; schema: ";
+ String errorDesc = "Result Schema didn't match Calcite Optimized Op Tree; schema: ";
for (FieldSchema fs : resultSchema) {
errorDesc += "[" + fs.getName() + ":" + fs.getType() + "], ";
}
@@ -209,17 +203,17 @@ private static String generateInvalidSchemaMessage(ProjectRelBase topLevelProj,
private static RelNode renameTopLevelSelectInResultSchema(final RelNode rootRel,
Pair topSelparentPair, List resultSchema)
- throws OptiqSemanticException {
+ throws CalciteSemanticException {
RelNode parentOforiginalProjRel = topSelparentPair.getKey();
- HiveProjectRel originalProjRel = (HiveProjectRel) topSelparentPair.getValue();
+ HiveProject originalProjRel = (HiveProject) topSelparentPair.getValue();
// Assumption: top portion of tree could only be
- // (limit)?(OB)?(ProjectRelBase)....
+ // (limit)?(OB)?(Project)....
List rootChildExps = originalProjRel.getChildExps();
if (resultSchema.size() != rootChildExps.size()) {
// Safeguard against potential issues in CBO RowResolver construction. Disable CBO for now.
LOG.error(generateInvalidSchemaMessage(originalProjRel, resultSchema, 0));
- throw new OptiqSemanticException("Result Schema didn't match Optimized Op Tree Schema");
+ throw new CalciteSemanticException("Result Schema didn't match Optimized Op Tree Schema");
}
List newSelAliases = new ArrayList();
@@ -232,7 +226,7 @@ private static RelNode renameTopLevelSelectInResultSchema(final RelNode rootRel,
newSelAliases.add(colAlias);
}
- HiveProjectRel replacementProjectRel = HiveProjectRel.create(originalProjRel.getChild(),
+ HiveProject replacementProjectRel = HiveProject.create(originalProjRel.getInput(),
originalProjRel.getChildExps(), newSelAliases);
if (rootRel == originalProjRel) {
@@ -244,9 +238,9 @@ private static RelNode renameTopLevelSelectInResultSchema(final RelNode rootRel,
}
private static RelNode introduceDerivedTable(final RelNode rel) {
- List projectList = HiveOptiqUtil.getProjsFromBelowAsInputRef(rel);
+ List projectList = HiveCalciteUtil.getProjsFromBelowAsInputRef(rel);
- HiveProjectRel select = HiveProjectRel.create(rel.getCluster(), rel, projectList,
+ HiveProject select = HiveProject.create(rel.getCluster(), rel, projectList,
rel.getRowType(), rel.getCollationList());
return select;
@@ -279,11 +273,11 @@ private static RelNode introduceDerivedTable(final RelNode rel, RelNode parent)
private static boolean validJoinParent(RelNode joinNode, RelNode parent) {
boolean validParent = true;
- if (parent instanceof JoinRelBase) {
- if (((JoinRelBase) parent).getRight() == joinNode) {
+ if (parent instanceof Join) {
+ if (((Join) parent).getRight() == joinNode) {
validParent = false;
}
- } else if (parent instanceof SetOpRel) {
+ } else if (parent instanceof SetOp) {
validParent = false;
}
@@ -295,8 +289,8 @@ private static boolean validFilterParent(RelNode filterNode, RelNode parent) {
// TOODO: Verify GB having is not a seperate filter (if so we shouldn't
// introduce derived table)
- if (parent instanceof FilterRelBase || parent instanceof JoinRelBase
- || parent instanceof SetOpRel) {
+ if (parent instanceof Filter || parent instanceof Join
+ || parent instanceof SetOp) {
validParent = false;
}
@@ -308,9 +302,9 @@ private static boolean validGBParent(RelNode gbNode, RelNode parent) {
// TOODO: Verify GB having is not a seperate filter (if so we shouldn't
// introduce derived table)
- if (parent instanceof JoinRelBase || parent instanceof SetOpRel
- || parent instanceof AggregateRelBase
- || (parent instanceof FilterRelBase && ((AggregateRelBase) gbNode).getGroupSet().isEmpty())) {
+ if (parent instanceof Join || parent instanceof SetOp
+ || parent instanceof Aggregate
+ || (parent instanceof Filter && ((Aggregate) gbNode).getGroupSet().isEmpty())) {
validParent = false;
}
@@ -320,19 +314,19 @@ private static boolean validGBParent(RelNode gbNode, RelNode parent) {
private static boolean validSortParent(RelNode sortNode, RelNode parent) {
boolean validParent = true;
- if (parent != null && !(parent instanceof ProjectRelBase)
- && !((parent instanceof SortRel) || HiveOptiqUtil.orderRelNode(parent)))
+ if (parent != null && !(parent instanceof Project)
+ && !((parent instanceof Sort) || HiveCalciteUtil.orderRelNode(parent)))
validParent = false;
return validParent;
}
- private static boolean validSortChild(HiveSortRel sortNode) {
+ private static boolean validSortChild(HiveSort sortNode) {
boolean validChild = true;
- RelNode child = sortNode.getChild();
+ RelNode child = sortNode.getInput();
- if (!(HiveOptiqUtil.limitRelNode(sortNode) && HiveOptiqUtil.orderRelNode(child))
- && !(child instanceof ProjectRelBase)) {
+ if (!(HiveCalciteUtil.limitRelNode(sortNode) && HiveCalciteUtil.orderRelNode(child))
+ && !(child instanceof Project)) {
validChild = false;
}
@@ -342,7 +336,7 @@ private static boolean validSortChild(HiveSortRel sortNode) {
private static boolean validSetopParent(RelNode setop, RelNode parent) {
boolean validChild = true;
- if (parent != null && !(parent instanceof ProjectRelBase)) {
+ if (parent != null && !(parent instanceof Project)) {
validChild = false;
}
@@ -352,7 +346,7 @@ private static boolean validSetopParent(RelNode setop, RelNode parent) {
private static boolean validSetopChild(RelNode setopChild) {
boolean validChild = true;
- if (!(setopChild instanceof ProjectRelBase)) {
+ if (!(setopChild instanceof Project)) {
validChild = false;
}
@@ -361,7 +355,7 @@ private static boolean validSetopChild(RelNode setopChild) {
private static boolean isEmptyGrpAggr(RelNode gbNode) {
// Verify if both groupset and aggrfunction are empty)
- AggregateRelBase aggrnode = (AggregateRelBase) gbNode;
+ Aggregate aggrnode = (Aggregate) gbNode;
if (aggrnode.getGroupSet().isEmpty() && aggrnode.getAggCallList().isEmpty()) {
return true;
}
@@ -378,19 +372,20 @@ private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) {
+ rexNode.getKind());
}
}
- HiveAggregateRel oldAggRel = (HiveAggregateRel) rel;
+ HiveAggregate oldAggRel = (HiveAggregate) rel;
RelDataTypeFactory typeFactory = oldAggRel.getCluster().getTypeFactory();
RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory);
RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory);
// Create the dummy aggregation.
- Aggregation countFn = (Aggregation) SqlFunctionConverter.getOptiqAggFn("count",
+ SqlAggFunction countFn = (SqlAggFunction) SqlFunctionConverter.getCalciteAggFn("count",
ImmutableList.of(intType), longType);
// TODO: Using 0 might be wrong; might need to walk down to find the
// proper index of a dummy.
List argList = ImmutableList.of(0);
AggregateCall dummyCall = new AggregateCall(countFn, false, argList, longType, null);
- AggregateRelBase newAggRel = oldAggRel.copy(oldAggRel.getTraitSet(), oldAggRel.getChild(),
- oldAggRel.getGroupSet(), ImmutableList.of(dummyCall));
+ Aggregate newAggRel = oldAggRel.copy(oldAggRel.getTraitSet(), oldAggRel.getInput(),
+ oldAggRel.indicator, oldAggRel.getGroupSet(), oldAggRel.getGroupSets(),
+ ImmutableList.of(dummyCall));
RelNode select = introduceDerivedTable(newAggRel);
parent.replaceInput(0, select);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
similarity index 83%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index e6052e75458988143b11f28692ce335ebef0b700..aff22124e006805d3504dc8729ebc5b25b0b400e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.math.BigDecimal;
import java.math.BigInteger;
@@ -28,8 +28,18 @@
import java.util.List;
import java.util.Map;
-import net.hydromatic.avatica.ByteString;
-
+import org.apache.calcite.avatica.ByteString;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlCastFunction;
+import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.common.type.Decimal128;
@@ -37,7 +47,7 @@
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveVarchar;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.parse.ParseUtils;
import org.apache.hadoop.hive.ql.parse.RowResolver;
import org.apache.hadoop.hive.ql.parse.SemanticException;
@@ -62,17 +72,6 @@
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.rex.RexBuilder;
-import org.eigenbase.rex.RexCall;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexUtil;
-import org.eigenbase.sql.SqlOperator;
-import org.eigenbase.sql.fun.SqlCastFunction;
-import org.eigenbase.sql.type.SqlTypeName;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
@@ -82,17 +81,17 @@
private static final Log LOG = LogFactory.getLog(RexNodeConverter.class);
private static class InputCtx {
- private final RelDataType optiqInpDataType;
+ private final RelDataType calciteInpDataType;
private final ImmutableMap hiveNameToPosMap;
private final RowResolver hiveRR;
- private final int offsetInOptiqSchema;
+ private final int offsetInCalciteSchema;
- private InputCtx(RelDataType optiqInpDataType, ImmutableMap hiveNameToPosMap,
- RowResolver hiveRR, int offsetInOptiqSchema) {
- this.optiqInpDataType = optiqInpDataType;
+ private InputCtx(RelDataType calciteInpDataType, ImmutableMap hiveNameToPosMap,
+ RowResolver hiveRR, int offsetInCalciteSchema) {
+ this.calciteInpDataType = calciteInpDataType;
this.hiveNameToPosMap = hiveNameToPosMap;
this.hiveRR = hiveRR;
- this.offsetInOptiqSchema = offsetInOptiqSchema;
+ this.offsetInCalciteSchema = offsetInCalciteSchema;
}
};
@@ -138,7 +137,7 @@ private RexNode convert(final ExprNodeFieldDesc fieldDesc) throws SemanticExcept
} else {
// This may happen for schema-less tables, where columns are dynamically
// supplied by serdes.
- throw new OptiqSemanticException("Unexpected rexnode : "
+ throw new CalciteSemanticException("Unexpected rexnode : "
+ rexNode.getClass().getCanonicalName());
}
}
@@ -197,14 +196,14 @@ private RexNode convert(final ExprNodeGenericFuncDesc func) throws SemanticExcep
if (expr == null) {
// This is not a cast; process the function.
retType = TypeConverter.convert(func.getTypeInfo(), cluster.getTypeFactory());
- SqlOperator optiqOp = SqlFunctionConverter.getOptiqOperator(func.getFuncText(),
+ SqlOperator calciteOp = SqlFunctionConverter.getCalciteOperator(func.getFuncText(),
func.getGenericUDF(), argTypeBldr.build(), retType);
- expr = cluster.getRexBuilder().makeCall(optiqOp, childRexNodeLst);
+ expr = cluster.getRexBuilder().makeCall(calciteOp, childRexNodeLst);
} else {
retType = expr.getType();
}
- // TODO: Cast Function in Optiq have a bug where it infertype on cast throws
+ // TODO: Cast Function in Calcite have a bug where it infer type on cast throws
// an exception
if (flattenExpr && (expr instanceof RexCall)
&& !(((RexCall) expr).getOperator() instanceof SqlCastFunction)) {
@@ -238,7 +237,7 @@ private boolean castExprUsingUDFBridge(GenericUDF gUDF) {
}
private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List childRexNodeLst)
- throws OptiqSemanticException {
+ throws CalciteSemanticException {
RexNode castExpr = null;
if (childRexNodeLst != null && childRexNodeLst.size() == 1) {
@@ -284,17 +283,17 @@ protected RexNode convert(ExprNodeColumnDesc col) throws SemanticException {
InputCtx ic = getInputCtx(col);
int pos = ic.hiveNameToPosMap.get(col.getColumn());
return cluster.getRexBuilder().makeInputRef(
- ic.optiqInpDataType.getFieldList().get(pos).getType(), pos + ic.offsetInOptiqSchema);
+ ic.calciteInpDataType.getFieldList().get(pos).getType(), pos + ic.offsetInCalciteSchema);
}
private static final BigInteger MIN_LONG_BI = BigInteger.valueOf(Long.MIN_VALUE),
MAX_LONG_BI = BigInteger.valueOf(Long.MAX_VALUE);
- protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticException {
+ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticException {
RexBuilder rexBuilder = cluster.getRexBuilder();
RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo();
- RelDataType optiqDataType = TypeConverter.convert(hiveType, dtFactory);
+ RelDataType calciteDataType = TypeConverter.convert(hiveType, dtFactory);
PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory();
@@ -302,25 +301,25 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticExce
Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(),
coi);
- RexNode optiqLiteral = null;
+ RexNode calciteLiteral = null;
// TODO: Verify if we need to use ConstantObjectInspector to unwrap data
switch (hiveTypeCategory) {
case BOOLEAN:
- optiqLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue());
+ calciteLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue());
break;
case BYTE:
byte[] byteArray = new byte[] { (Byte) value };
ByteString bs = new ByteString(byteArray);
- optiqLiteral = rexBuilder.makeBinaryLiteral(bs);
+ calciteLiteral = rexBuilder.makeBinaryLiteral(bs);
break;
case SHORT:
- optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), optiqDataType);
+ calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), calciteDataType);
break;
case INT:
- optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value));
+ calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value));
break;
case LONG:
- optiqLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value));
+ calciteLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value));
break;
// TODO: is Decimal an exact numeric or approximate numeric?
case DECIMAL:
@@ -340,14 +339,14 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticExce
// state in SA/QB.
// For now, we will not run CBO in the presence of invalid decimal
// literals.
- throw new OptiqSemanticException("Expression " + literal.getExprString()
+ throw new CalciteSemanticException("Expression " + literal.getExprString()
+ " is not a valid decimal");
// TODO: return createNullLiteral(literal);
}
BigDecimal bd = (BigDecimal) value;
BigInteger unscaled = bd.unscaledValue();
if (unscaled.compareTo(MIN_LONG_BI) >= 0 && unscaled.compareTo(MAX_LONG_BI) <= 0) {
- optiqLiteral = rexBuilder.makeExactLiteral(bd);
+ calciteLiteral = rexBuilder.makeExactLiteral(bd);
} else {
// CBO doesn't support unlimited precision decimals. In practice, this
// will work...
@@ -355,35 +354,35 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticExce
// to no CBO.
RelDataType relType = cluster.getTypeFactory().createSqlType(SqlTypeName.DECIMAL,
bd.scale(), unscaled.toString().length());
- optiqLiteral = rexBuilder.makeExactLiteral(bd, relType);
+ calciteLiteral = rexBuilder.makeExactLiteral(bd, relType);
}
break;
case FLOAT:
- optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), optiqDataType);
+ calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), calciteDataType);
break;
case DOUBLE:
- optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), optiqDataType);
+ calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), calciteDataType);
break;
case CHAR:
if (value instanceof HiveChar)
value = ((HiveChar) value).getValue();
- optiqLiteral = rexBuilder.makeLiteral((String) value);
+ calciteLiteral = rexBuilder.makeLiteral((String) value);
break;
case VARCHAR:
if (value instanceof HiveVarchar)
value = ((HiveVarchar) value).getValue();
- optiqLiteral = rexBuilder.makeLiteral((String) value);
+ calciteLiteral = rexBuilder.makeLiteral((String) value);
break;
case STRING:
- optiqLiteral = rexBuilder.makeLiteral((String) value);
+ calciteLiteral = rexBuilder.makeLiteral((String) value);
break;
case DATE:
Calendar cal = new GregorianCalendar();
cal.setTime((Date) value);
- optiqLiteral = rexBuilder.makeDateLiteral(cal);
+ calciteLiteral = rexBuilder.makeDateLiteral(cal);
break;
case TIMESTAMP:
- optiqLiteral = rexBuilder.makeTimestampLiteral((Calendar) value,
+ calciteLiteral = rexBuilder.makeTimestampLiteral((Calendar) value,
RelDataType.PRECISION_NOT_SPECIFIED);
break;
case BINARY:
@@ -393,23 +392,23 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticExce
throw new RuntimeException("UnSupported Literal");
}
- return optiqLiteral;
+ return calciteLiteral;
}
- private RexNode createNullLiteral(ExprNodeDesc expr) throws OptiqSemanticException {
+ private RexNode createNullLiteral(ExprNodeDesc expr) throws CalciteSemanticException {
return cluster.getRexBuilder().makeNullLiteral(
TypeConverter.convert(expr.getTypeInfo(), cluster.getTypeFactory()).getSqlTypeName());
}
public static RexNode convert(RelOptCluster cluster, ExprNodeDesc joinCondnExprNode,
List inputRels, LinkedHashMap relToHiveRR,
- Map> relToHiveColNameOptiqPosMap, boolean flattenExpr)
+ Map> relToHiveColNameCalcitePosMap, boolean flattenExpr)
throws SemanticException {
List inputCtxLst = new ArrayList();
int offSet = 0;
for (RelNode r : inputRels) {
- inputCtxLst.add(new InputCtx(r.getRowType(), relToHiveColNameOptiqPosMap.get(r), relToHiveRR
+ inputCtxLst.add(new InputCtx(r.getRowType(), relToHiveColNameCalcitePosMap.get(r), relToHiveRR
.get(r), offSet));
offSet += r.getRowType().getFieldCount();
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
similarity index 77%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 0994c95b6b0a71c7351466adad73a64c99e202c6..c864eb823178fbad21faf6a7d613ec43525777ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -15,12 +15,27 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.lang.annotation.Annotation;
import java.util.List;
import java.util.Map;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.InferTypes;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.type.SqlOperandTypeChecker;
+import org.apache.calcite.sql.type.SqlOperandTypeInference;
+import org.apache.calcite.sql.type.SqlReturnTypeInference;
+import org.apache.calcite.sql.type.SqlTypeFamily;
+import org.apache.calcite.util.Util;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -28,7 +43,7 @@
import org.apache.hadoop.hive.ql.exec.FunctionInfo;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
@@ -43,22 +58,6 @@
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.sql.SqlAggFunction;
-import org.eigenbase.sql.SqlFunction;
-import org.eigenbase.sql.SqlFunctionCategory;
-import org.eigenbase.sql.SqlKind;
-import org.eigenbase.sql.SqlOperator;
-import org.eigenbase.sql.fun.SqlStdOperatorTable;
-import org.eigenbase.sql.type.InferTypes;
-import org.eigenbase.sql.type.OperandTypes;
-import org.eigenbase.sql.type.ReturnTypes;
-import org.eigenbase.sql.type.SqlOperandTypeChecker;
-import org.eigenbase.sql.type.SqlOperandTypeInference;
-import org.eigenbase.sql.type.SqlReturnTypeInference;
-import org.eigenbase.sql.type.SqlTypeFamily;
-import org.eigenbase.util.Util;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
@@ -67,19 +66,19 @@
public class SqlFunctionConverter {
private static final Log LOG = LogFactory.getLog(SqlFunctionConverter.class);
- static final Map hiveToOptiq;
- static final Map optiqToHiveToken;
+ static final Map hiveToCalcite;
+ static final Map calciteToHiveToken;
static final Map reverseOperatorMap;
static {
StaticBlockBuilder builder = new StaticBlockBuilder();
- hiveToOptiq = ImmutableMap.copyOf(builder.hiveToOptiq);
- optiqToHiveToken = ImmutableMap.copyOf(builder.optiqToHiveToken);
+ hiveToCalcite = ImmutableMap.copyOf(builder.hiveToCalcite);
+ calciteToHiveToken = ImmutableMap.copyOf(builder.calciteToHiveToken);
reverseOperatorMap = ImmutableMap.copyOf(builder.reverseOperatorMap);
}
- public static SqlOperator getOptiqOperator(String funcTextName, GenericUDF hiveUDF,
- ImmutableList optiqArgTypes, RelDataType retType) throws OptiqSemanticException {
+ public static SqlOperator getCalciteOperator(String funcTextName, GenericUDF hiveUDF,
+ ImmutableList calciteArgTypes, RelDataType retType) throws CalciteSemanticException {
// handle overloaded methods first
if (hiveUDF instanceof GenericUDFOPNegative) {
return SqlStdOperatorTable.UNARY_MINUS;
@@ -96,7 +95,7 @@ public static SqlOperator getOptiqOperator(String funcTextName, GenericUDF hiveU
// proper...
name = FunctionRegistry.getNormalizedFunctionName(funcTextName);
}
- return getOptiqFn(name, optiqArgTypes, retType);
+ return getCalciteFn(name, calciteArgTypes, retType);
}
public static GenericUDF getHiveUDF(SqlOperator op, RelDataType dt, int argsLength) {
@@ -185,7 +184,7 @@ private static FunctionInfo handleCastForParameterizedType(TypeInfo ti, Function
// TODO: 1) handle Agg Func Name translation 2) is it correct to add func args
// as child of func?
public static ASTNode buildAST(SqlOperator op, List children) {
- HiveToken hToken = optiqToHiveToken.get(op);
+ HiveToken hToken = calciteToHiveToken.get(op);
ASTNode node;
if (hToken != null) {
node = (ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text);
@@ -220,7 +219,7 @@ public static ASTNode buildAST(SqlOperator op, List children) {
*/
public static ASTNode buildAST(SqlOperator op, List children, int i) {
if (i + 1 < children.size()) {
- HiveToken hToken = optiqToHiveToken.get(op);
+ HiveToken hToken = calciteToHiveToken.get(op);
ASTNode curNode = ((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text));
ParseDriver.adaptor.addChild(curNode, children.get(i));
ParseDriver.adaptor.addChild(curNode, buildAST(op, children, i + 1));
@@ -270,8 +269,8 @@ private static String getName(GenericUDF hiveUDF) {
/** This class is used to build immutable hashmaps in the static block above. */
private static class StaticBlockBuilder {
- final Map hiveToOptiq = Maps.newHashMap();
- final Map optiqToHiveToken = Maps.newHashMap();
+ final Map hiveToCalcite = Maps.newHashMap();
+ final Map calciteToHiveToken = Maps.newHashMap();
final Map reverseOperatorMap = Maps.newHashMap();
StaticBlockBuilder() {
@@ -293,8 +292,8 @@ private static String getName(GenericUDF hiveUDF) {
registerFunction("<>", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveParser.NOTEQUAL, "<>"));
}
- private void registerFunction(String name, SqlOperator optiqFn, HiveToken hiveToken) {
- reverseOperatorMap.put(optiqFn, name);
+ private void registerFunction(String name, SqlOperator calciteFn, HiveToken hiveToken) {
+ reverseOperatorMap.put(calciteFn, name);
FunctionInfo hFn;
try {
hFn = FunctionRegistry.getFunctionInfo(name);
@@ -304,10 +303,10 @@ private void registerFunction(String name, SqlOperator optiqFn, HiveToken hiveTo
}
if (hFn != null) {
String hFnName = getName(hFn.getGenericUDF());
- hiveToOptiq.put(hFnName, optiqFn);
+ hiveToCalcite.put(hFnName, calciteFn);
if (hiveToken != null) {
- optiqToHiveToken.put(optiqFn, hiveToken);
+ calciteToHiveToken.put(calciteFn, hiveToken);
}
}
}
@@ -317,31 +316,16 @@ private static HiveToken hToken(int type, String text) {
return new HiveToken(type, text);
}
- public static class OptiqUDAF extends SqlAggFunction {
- final ImmutableList argTypes;
- final RelDataType retType;
-
- public OptiqUDAF(String opName, SqlReturnTypeInference returnTypeInference,
+ public static class CalciteUDAF extends SqlAggFunction {
+ public CalciteUDAF(String opName, SqlReturnTypeInference returnTypeInference,
SqlOperandTypeInference operandTypeInference, SqlOperandTypeChecker operandTypeChecker,
ImmutableList argTypes, RelDataType retType) {
super(opName, SqlKind.OTHER_FUNCTION, returnTypeInference, operandTypeInference,
operandTypeChecker, SqlFunctionCategory.USER_DEFINED_FUNCTION);
- this.argTypes = argTypes;
- this.retType = retType;
- }
-
- @Override
- public List getParameterTypes(final RelDataTypeFactory typeFactory) {
- return this.argTypes;
- }
-
- @Override
- public RelDataType getReturnType(final RelDataTypeFactory typeFactory) {
- return this.retType;
}
}
- private static class OptiqUDFInfo {
+ private static class CalciteUDFInfo {
private String udfName;
private SqlReturnTypeInference returnTypeInference;
private SqlOperandTypeInference operandTypeInference;
@@ -350,57 +334,57 @@ public RelDataType getReturnType(final RelDataTypeFactory typeFactory) {
private RelDataType retType;
}
- private static OptiqUDFInfo getUDFInfo(String hiveUdfName,
- ImmutableList optiqArgTypes, RelDataType optiqRetType) {
- OptiqUDFInfo udfInfo = new OptiqUDFInfo();
+ private static CalciteUDFInfo getUDFInfo(String hiveUdfName,
+ ImmutableList calciteArgTypes, RelDataType calciteRetType) {
+ CalciteUDFInfo udfInfo = new CalciteUDFInfo();
udfInfo.udfName = hiveUdfName;
- udfInfo.returnTypeInference = ReturnTypes.explicit(optiqRetType);
- udfInfo.operandTypeInference = InferTypes.explicit(optiqArgTypes);
+ udfInfo.returnTypeInference = ReturnTypes.explicit(calciteRetType);
+ udfInfo.operandTypeInference = InferTypes.explicit(calciteArgTypes);
ImmutableList.Builder typeFamilyBuilder = new ImmutableList.Builder();
- for (RelDataType at : optiqArgTypes) {
+ for (RelDataType at : calciteArgTypes) {
typeFamilyBuilder.add(Util.first(at.getSqlTypeName().getFamily(), SqlTypeFamily.ANY));
}
udfInfo.operandTypeChecker = OperandTypes.family(typeFamilyBuilder.build());
- udfInfo.argTypes = ImmutableList. copyOf(optiqArgTypes);
- udfInfo.retType = optiqRetType;
+ udfInfo.argTypes = ImmutableList. copyOf(calciteArgTypes);
+ udfInfo.retType = calciteRetType;
return udfInfo;
}
- public static SqlOperator getOptiqFn(String hiveUdfName,
- ImmutableList optiqArgTypes, RelDataType optiqRetType)
- throws OptiqSemanticException {
+ public static SqlOperator getCalciteFn(String hiveUdfName,
+ ImmutableList calciteArgTypes, RelDataType calciteRetType)
+ throws CalciteSemanticException {
if (hiveUdfName != null && hiveUdfName.trim().equals("<=>")) {
- // We can create Optiq IS_DISTINCT_FROM operator for this. But since our
+ // We can create Calcite IS_DISTINCT_FROM operator for this. But since our
// join reordering algo cant handle this anyway there is no advantage of
// this.
// So, bail out for now.
- throw new OptiqSemanticException("<=> is not yet supported for cbo.");
+ throw new CalciteSemanticException("<=> is not yet supported for cbo.");
}
- SqlOperator optiqOp = hiveToOptiq.get(hiveUdfName);
- if (optiqOp == null) {
- OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType);
- optiqOp = new SqlFunction(uInf.udfName, SqlKind.OTHER_FUNCTION, uInf.returnTypeInference,
+ SqlOperator calciteOp = hiveToCalcite.get(hiveUdfName);
+ if (calciteOp == null) {
+ CalciteUDFInfo uInf = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
+ calciteOp = new SqlFunction(uInf.udfName, SqlKind.OTHER_FUNCTION, uInf.returnTypeInference,
uInf.operandTypeInference, uInf.operandTypeChecker,
SqlFunctionCategory.USER_DEFINED_FUNCTION);
}
- return optiqOp;
+ return calciteOp;
}
- public static SqlAggFunction getOptiqAggFn(String hiveUdfName,
- ImmutableList optiqArgTypes, RelDataType optiqRetType) {
- SqlAggFunction optiqAggFn = (SqlAggFunction) hiveToOptiq.get(hiveUdfName);
- if (optiqAggFn == null) {
- OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType);
+ public static SqlAggFunction getCalciteAggFn(String hiveUdfName,
+ ImmutableList calciteArgTypes, RelDataType calciteRetType) {
+ SqlAggFunction calciteAggFn = (SqlAggFunction) hiveToCalcite.get(hiveUdfName);
+ if (calciteAggFn == null) {
+ CalciteUDFInfo uInf = getUDFInfo(hiveUdfName, calciteArgTypes, calciteRetType);
- optiqAggFn = new OptiqUDAF(uInf.udfName, uInf.returnTypeInference, uInf.operandTypeInference,
+ calciteAggFn = new CalciteUDAF(uInf.udfName, uInf.returnTypeInference, uInf.operandTypeInference,
uInf.operandTypeChecker, uInf.argTypes, uInf.retType);
}
- return optiqAggFn;
+ return calciteAggFn;
}
static class HiveToken {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
similarity index 86%
rename from ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java
rename to ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index 68f3be7948df6974fb9412b99742527971207362..88c989fcef53bd14732a6fbb1a92efd708887493 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -15,17 +15,23 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.ql.optimizer.optiq.translator;
+package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.exec.RowSchema;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.RowResolver;
import org.apache.hadoop.hive.serde.serdeConstants;
@@ -38,12 +44,6 @@
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexBuilder;
-import org.eigenbase.sql.type.SqlTypeName;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableMap;
@@ -51,7 +51,7 @@
import com.google.common.collect.Lists;
public class TypeConverter {
- private static final Map optiqToHiveTypeNameMap;
+ private static final Map calciteToHiveTypeNameMap;
// TODO: Handling of char[], varchar[], string...
static {
@@ -66,12 +66,12 @@
b.put(SqlTypeName.DATE.getName(), new HiveToken(HiveParser.TOK_DATE, "TOK_DATE"));
b.put(SqlTypeName.TIMESTAMP.getName(), new HiveToken(HiveParser.TOK_TIMESTAMP, "TOK_TIMESTAMP"));
b.put(SqlTypeName.BINARY.getName(), new HiveToken(HiveParser.TOK_BINARY, "TOK_BINARY"));
- optiqToHiveTypeNameMap = b.build();
+ calciteToHiveTypeNameMap = b.build();
};
- /*********************** Convert Hive Types To Optiq Types ***********************/
+ /*********************** Convert Hive Types To Calcite Types ***********************/
public static RelDataType getType(RelOptCluster cluster,
- List cInfoLst) throws OptiqSemanticException {
+ List cInfoLst) throws CalciteSemanticException {
RexBuilder rexBuilder = cluster.getRexBuilder();
RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
List fieldTypes = new LinkedList();
@@ -85,7 +85,7 @@ public static RelDataType getType(RelOptCluster cluster,
}
public static RelDataType getType(RelOptCluster cluster, RowResolver rr,
- List neededCols) throws OptiqSemanticException {
+ List neededCols) throws CalciteSemanticException {
RexBuilder rexBuilder = cluster.getRexBuilder();
RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
RowSchema rs = rr.getRowSchema();
@@ -102,7 +102,7 @@ public static RelDataType getType(RelOptCluster cluster, RowResolver rr,
}
public static RelDataType convert(TypeInfo type, RelDataTypeFactory dtFactory)
- throws OptiqSemanticException{
+ throws CalciteSemanticException{
RelDataType convertedType = null;
switch (type.getCategory()) {
@@ -191,20 +191,20 @@ public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtF
}
public static RelDataType convert(ListTypeInfo lstType,
- RelDataTypeFactory dtFactory) throws OptiqSemanticException {
+ RelDataTypeFactory dtFactory) throws CalciteSemanticException {
RelDataType elemType = convert(lstType.getListElementTypeInfo(), dtFactory);
return dtFactory.createArrayType(elemType, -1);
}
public static RelDataType convert(MapTypeInfo mapType, RelDataTypeFactory dtFactory)
- throws OptiqSemanticException {
+ throws CalciteSemanticException {
RelDataType keyType = convert(mapType.getMapKeyTypeInfo(), dtFactory);
RelDataType valueType = convert(mapType.getMapValueTypeInfo(), dtFactory);
return dtFactory.createMapType(keyType, valueType);
}
public static RelDataType convert(StructTypeInfo structType,
- final RelDataTypeFactory dtFactory) throws OptiqSemanticException {
+ final RelDataTypeFactory dtFactory) throws CalciteSemanticException {
List fTypes = new ArrayList(structType.getAllStructFieldTypeInfos().size());
for (TypeInfo ti : structType.getAllStructFieldTypeInfos()) {
fTypes.add(convert(ti,dtFactory));
@@ -213,9 +213,9 @@ public static RelDataType convert(StructTypeInfo structType,
}
public static RelDataType convert(UnionTypeInfo unionType, RelDataTypeFactory dtFactory)
- throws OptiqSemanticException{
- // Union type is not supported in Optiq.
- throw new OptiqSemanticException("Union type is not supported");
+ throws CalciteSemanticException{
+ // Union type is not supported in Calcite.
+ throw new CalciteSemanticException("Union type is not supported");
}
public static TypeInfo convert(RelDataType rType) {
@@ -295,31 +295,31 @@ public static TypeInfo convertPrimtiveType(RelDataType rType) {
}
- /*********************** Convert Optiq Types To Hive Types ***********************/
- public static HiveToken hiveToken(RelDataType optiqType) {
+ /*********************** Convert Calcite Types To Hive Types ***********************/
+ public static HiveToken hiveToken(RelDataType calciteType) {
HiveToken ht = null;
- switch (optiqType.getSqlTypeName()) {
+ switch (calciteType.getSqlTypeName()) {
case CHAR: {
- ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(optiqType.getPrecision()));
+ ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(calciteType.getPrecision()));
}
break;
case VARCHAR: {
- if (optiqType.getPrecision() == Integer.MAX_VALUE)
- ht = new HiveToken(HiveParser.TOK_STRING, "TOK_STRING", String.valueOf(optiqType
+ if (calciteType.getPrecision() == Integer.MAX_VALUE)
+ ht = new HiveToken(HiveParser.TOK_STRING, "TOK_STRING", String.valueOf(calciteType
.getPrecision()));
else
- ht = new HiveToken(HiveParser.TOK_VARCHAR, "TOK_VARCHAR", String.valueOf(optiqType
+ ht = new HiveToken(HiveParser.TOK_VARCHAR, "TOK_VARCHAR", String.valueOf(calciteType
.getPrecision()));
}
break;
case DECIMAL: {
- ht = new HiveToken(HiveParser.TOK_DECIMAL, "TOK_DECIMAL", String.valueOf(optiqType
- .getPrecision()), String.valueOf(optiqType.getScale()));
+ ht = new HiveToken(HiveParser.TOK_DECIMAL, "TOK_DECIMAL", String.valueOf(calciteType
+ .getPrecision()), String.valueOf(calciteType.getScale()));
}
break;
default:
- ht = optiqToHiveTypeNameMap.get(optiqType.getSqlTypeName().getName());
+ ht = calciteToHiveTypeNameMap.get(calciteType.getSqlTypeName().getName());
}
return ht;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 95c6d00453d5d5b7dac3ef35b85a1c8a0f59f725..8fee5aa61787d791faa0685b455297820262c074 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -20,11 +20,28 @@
import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS;
-import com.google.common.base.Function;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.io.Serializable;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
import org.antlr.runtime.ClassicToken;
import org.antlr.runtime.Token;
@@ -33,6 +50,67 @@
import org.antlr.runtime.tree.TreeVisitorAction;
import org.antlr.runtime.tree.TreeWizard;
import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelOptPlanner;
+import org.apache.calcite.plan.RelOptQuery;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptSchema;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.plan.hep.HepMatchOrder;
+import org.apache.calcite.plan.hep.HepPlanner;
+import org.apache.calcite.plan.hep.HepProgram;
+import org.apache.calcite.plan.hep.HepProgramBuilder;
+import org.apache.calcite.rel.InvalidRelException;
+import org.apache.calcite.rel.RelCollation;
+import org.apache.calcite.rel.RelCollationImpl;
+import org.apache.calcite.rel.RelFieldCollation;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.SemiJoin;
+import org.apache.calcite.rel.metadata.CachingRelMetadataProvider;
+import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
+import org.apache.calcite.rel.rules.FilterAggregateTransposeRule;
+import org.apache.calcite.rel.rules.FilterMergeRule;
+import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
+import org.apache.calcite.rel.rules.FilterSetOpTransposeRule;
+import org.apache.calcite.rel.rules.JoinPushTransitivePredicatesRule;
+import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
+import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
+import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule;
+import org.apache.calcite.rel.rules.SemiJoinJoinTransposeRule;
+import org.apache.calcite.rel.rules.SemiJoinProjectTransposeRule;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexFieldCollation;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.rex.RexWindowBound;
+import org.apache.calcite.schema.SchemaPlus;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlExplainLevel;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlLiteral;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlWindow;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql2rel.RelFieldTrimmer;
+import org.apache.calcite.tools.Frameworks;
+import org.apache.calcite.util.CompositeList;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.calcite.util.ImmutableIntList;
+import org.apache.calcite.util.Pair;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -100,29 +178,29 @@
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.optimizer.Optimizer;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveDefaultRelMetadataProvider;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.HiveTypeSystemImpl;
-import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException;
-import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable;
-import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil;
-import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveVolcanoPlanner;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveUnionRel;
-import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePartitionPrunerRule;
-import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePushFilterPastJoinRule;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ASTConverter;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.JoinCondTypeCheckProcFactory;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.JoinTypeCheckCtx;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.RexNodeConverter;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter;
-import org.apache.hadoop.hive.ql.optimizer.optiq.translator.TypeConverter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveDefaultRelMetadataProvider;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterJoinRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinCondTypeCheckProcFactory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinTypeCheckCtx;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.RexNodeConverter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext;
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType;
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
@@ -216,92 +294,12 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.OutputFormat;
-import org.eigenbase.rel.AggregateCall;
-import org.eigenbase.rel.AggregateRelBase;
-import org.eigenbase.rel.Aggregation;
-import org.eigenbase.rel.FilterRelBase;
-import org.eigenbase.rel.InvalidRelException;
-import org.eigenbase.rel.JoinRelBase;
-import org.eigenbase.rel.JoinRelType;
-import org.eigenbase.rel.RelCollation;
-import org.eigenbase.rel.RelCollationImpl;
-import org.eigenbase.rel.RelFactories;
-import org.eigenbase.rel.RelFieldCollation;
-import org.eigenbase.rel.RelNode;
-import org.eigenbase.rel.metadata.CachingRelMetadataProvider;
-import org.eigenbase.rel.metadata.ChainedRelMetadataProvider;
-import org.eigenbase.rel.metadata.RelMetadataProvider;
-import org.eigenbase.rel.rules.ConvertMultiJoinRule;
-import org.eigenbase.rel.rules.FilterAggregateTransposeRule;
-import org.eigenbase.rel.rules.LoptOptimizeJoinRule;
-import org.eigenbase.rel.rules.MergeFilterRule;
-import org.eigenbase.rel.rules.PushFilterPastProjectRule;
-import org.eigenbase.rel.rules.PushFilterPastSetOpRule;
-import org.eigenbase.rel.rules.PushSemiJoinPastFilterRule;
-import org.eigenbase.rel.rules.PushSemiJoinPastJoinRule;
-import org.eigenbase.rel.rules.PushSemiJoinPastProjectRule;
-import org.eigenbase.rel.rules.SemiJoinRel;
-import org.eigenbase.rel.rules.TransitivePredicatesOnJoinRule;
-import org.eigenbase.relopt.RelOptCluster;
-import org.eigenbase.relopt.RelOptPlanner;
-import org.eigenbase.relopt.RelOptQuery;
-import org.eigenbase.relopt.RelOptRule;
-import org.eigenbase.relopt.RelOptSchema;
-import org.eigenbase.relopt.RelOptUtil;
-import org.eigenbase.relopt.RelTraitSet;
-import org.eigenbase.relopt.hep.HepMatchOrder;
-import org.eigenbase.relopt.hep.HepPlanner;
-import org.eigenbase.relopt.hep.HepProgram;
-import org.eigenbase.relopt.hep.HepProgramBuilder;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.rex.RexBuilder;
-import org.eigenbase.rex.RexFieldCollation;
-import org.eigenbase.rex.RexInputRef;
-import org.eigenbase.rex.RexNode;
-import org.eigenbase.rex.RexUtil;
-import org.eigenbase.rex.RexWindowBound;
-import org.eigenbase.sql.SqlAggFunction;
-import org.eigenbase.sql.SqlCall;
-import org.eigenbase.sql.SqlExplainLevel;
-import org.eigenbase.sql.SqlKind;
-import org.eigenbase.sql.SqlLiteral;
-import org.eigenbase.sql.SqlNode;
-import org.eigenbase.sql.SqlWindow;
-import org.eigenbase.sql.parser.SqlParserPos;
-import org.eigenbase.sql.type.SqlTypeName;
-import org.eigenbase.sql2rel.RelFieldTrimmer;
-import org.eigenbase.util.CompositeList;
-import org.eigenbase.util.ImmutableIntList;
-import org.eigenbase.util.Pair;
-import java.io.IOException;
-import java.io.Serializable;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.regex.Pattern;
-import java.util.regex.PatternSyntaxException;
-
-import net.hydromatic.optiq.SchemaPlus;
-import net.hydromatic.optiq.tools.Frameworks;
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
/**
* Implementation of the semantic analyzer. It generates the query plan.
@@ -2864,7 +2862,7 @@ private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel,
}
if (ensureUniqueCols) {
if (!output.putWithCheck(tmp[0], tmp[1], null, oColInfo)) {
- throw new OptiqSemanticException("Cannot add column to RR: " + tmp[0] + "." + tmp[1]
+ throw new CalciteSemanticException("Cannot add column to RR: " + tmp[0] + "." + tmp[1]
+ " => " + oColInfo + " due to duplication, see previous warnings");
}
} else {
@@ -10055,12 +10053,12 @@ public void analyzeInternal(ASTNode ast) throws SemanticException {
if (runCBO) {
disableJoinMerge = true;
- OptiqBasedPlanner optiqPlanner = new OptiqBasedPlanner();
+ CalciteBasedPlanner calcitePlanner = new CalciteBasedPlanner();
boolean reAnalyzeAST = false;
try {
// 1. Gen Optimized AST
- ASTNode newAST = optiqPlanner.getOptimizedAST(prunedPartitions);
+ ASTNode newAST = calcitePlanner.getOptimizedAST(prunedPartitions);
// 1.1. Fix up the query for insert/ctas
newAST = fixUpCtasAndInsertAfterCbo(ast, newAST, cboCtx);
@@ -10095,14 +10093,14 @@ public void analyzeInternal(ASTNode ast) throws SemanticException {
* .getRowResolver(), true);
*/
} catch (Exception e) {
- boolean isMissingStats = optiqPlanner.noColsMissingStats.get() > 0;
+ boolean isMissingStats = calcitePlanner.noColsMissingStats.get() > 0;
if (isMissingStats) {
LOG.error("CBO failed due to missing column stats (see previous errors), skipping CBO");
} else {
LOG.error("CBO failed, skipping CBO. ", e);
}
if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || isMissingStats
- || e instanceof OptiqSemanticException) {
+ || e instanceof CalciteSemanticException) {
reAnalyzeAST = true;
} else if (e instanceof SemanticException) {
throw (SemanticException)e;
@@ -10266,7 +10264,7 @@ private boolean canHandleAstForCbo(ASTNode ast, QB qb, PreCboCtx cboCtx) {
// be supported and would require additional checks similar to IsQuery?
boolean isSupportedType =
qb.getIsQuery() || qb.isCTAS() || cboCtx.type == PreCboCtx.Type.INSERT;
- boolean noBadTokens = HiveOptiqUtil.validateASTForUnsupportedTokens(ast);
+ boolean noBadTokens = HiveCalciteUtil.validateASTForUnsupportedTokens(ast);
boolean result = isSupportedRoot && isSupportedType && createVwDesc == null && noBadTokens;
if (!result) {
if (needToLogMessage) {
@@ -12489,10 +12487,10 @@ protected boolean deleting() {
return false;
}
- /**** Temporary Place Holder For Optiq plan Gen, Optimizer ****/
+ /**** Temporary Place Holder For Calcite plan Gen, Optimizer ****/
/**
- * Entry point to Optimizations using Optiq. Checks whether Optiq can handle the query.
+ * Entry point to Optimizations using Calcite. Checks whether Calcite can handle the query.
* @param qbToChk Query block to check.
* @param verbose Whether return value should be verbose in case of failure.
* @return null if the query can be handled; non-null reason string if it cannot be.
@@ -12530,35 +12528,35 @@ private String canHandleQbForCbo(QB qbToChk, boolean topLevelQB, boolean verbose
return msg;
}
- private class OptiqBasedPlanner implements Frameworks.PlannerAction {
+ private class CalciteBasedPlanner implements Frameworks.PlannerAction {
private RelOptCluster cluster;
private RelOptSchema relOptSchema;
private SemanticException semanticException;
private Map partitionCache;
- private final AtomicInteger noColsMissingStats = new AtomicInteger(0);
+ private final AtomicInteger noColsMissingStats = new AtomicInteger(0);
List topLevelFieldSchema;
// TODO: Do we need to keep track of RR, ColNameToPosMap for every op or
// just last one.
LinkedHashMap relToHiveRR = new LinkedHashMap();
- LinkedHashMap> relToHiveColNameOptiqPosMap = new LinkedHashMap>();
+ LinkedHashMap> relToHiveColNameCalcitePosMap = new LinkedHashMap>();
private ASTNode getOptimizedAST(Map partitionCache)
throws SemanticException {
- ASTNode optiqOptimizedAST = null;
- RelNode optimizedOptiqPlan = null;
+ ASTNode calciteOptimizedAST = null;
+ RelNode optimizedCalcitePlan = null;
this.partitionCache = partitionCache;
try {
- optimizedOptiqPlan = Frameworks.withPlanner(this,
+ optimizedCalcitePlan = Frameworks.withPlanner(this,
Frameworks.newConfigBuilder().typeSystem(new HiveTypeSystemImpl()).build());
} catch (Exception e) {
rethrowCalciteException(e);
throw new AssertionError("rethrowCalciteException didn't throw for " + e.getMessage());
}
- optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, topLevelFieldSchema);
+ calciteOptimizedAST = ASTConverter.convert(optimizedCalcitePlan, topLevelFieldSchema);
- return optiqOptimizedAST;
+ return calciteOptimizedAST;
}
/*
@@ -12599,9 +12597,9 @@ private boolean isUselessCause(Throwable t) {
@Override
public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlus rootSchema) {
- RelNode optiqGenPlan = null;
- RelNode optiqPreCboPlan = null;
- RelNode optiqOptimizedPlan = null;
+ RelNode calciteGenPlan = null;
+ RelNode calcitePreCboPlan = null;
+ RelNode calciteOptimizedPlan = null;
/*
* recreate cluster, so that it picks up the additional traitDef
@@ -12615,24 +12613,24 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu
this.relOptSchema = relOptSchema;
try {
- optiqGenPlan = genLogicalPlan(qb, true);
- topLevelFieldSchema = convertRowSchemaToResultSetSchema(relToHiveRR.get(optiqGenPlan),
+ calciteGenPlan = genLogicalPlan(qb, true);
+ topLevelFieldSchema = convertRowSchemaToResultSetSchema(relToHiveRR.get(calciteGenPlan),
HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES));
} catch (SemanticException e) {
semanticException = e;
throw new RuntimeException(e);
}
- optiqPreCboPlan = applyPreCBOTransforms(optiqGenPlan, HiveDefaultRelMetadataProvider.INSTANCE);
+ calcitePreCboPlan = applyPreCBOTransforms(calciteGenPlan, HiveDefaultRelMetadataProvider.INSTANCE);
List list = Lists.newArrayList();
list.add(HiveDefaultRelMetadataProvider.INSTANCE);
- RelTraitSet desiredTraits = cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY);
+ RelTraitSet desiredTraits = cluster.traitSetOf(HiveRelNode.CONVENTION, RelCollationImpl.EMPTY);
HepProgram hepPgm = null;
HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP)
- .addRuleInstance(new ConvertMultiJoinRule(HiveJoinRel.class));
- hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoinRel.HIVE_JOIN_FACTORY,
- HiveProjectRel.DEFAULT_PROJECT_FACTORY, HiveFilterRel.DEFAULT_FILTER_FACTORY));
+ .addRuleInstance(new JoinToMultiJoinRule(HiveJoin.class));
+ hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoin.HIVE_JOIN_FACTORY,
+ HiveProject.DEFAULT_PROJECT_FACTORY, HiveFilter.DEFAULT_FILTER_FACTORY));
hepPgm = hepPgmBldr.build();
HepPlanner hepPlanner = new HepPlanner(hepPgm);
@@ -12641,25 +12639,25 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu
RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list);
cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
- RelNode rootRel = optiqPreCboPlan;
+ RelNode rootRel = calcitePreCboPlan;
hepPlanner.setRoot(rootRel);
- if (!optiqPreCboPlan.getTraitSet().equals(desiredTraits)) {
- rootRel = hepPlanner.changeTraits(optiqPreCboPlan, desiredTraits);
+ if (!calcitePreCboPlan.getTraitSet().equals(desiredTraits)) {
+ rootRel = hepPlanner.changeTraits(calcitePreCboPlan, desiredTraits);
}
hepPlanner.setRoot(rootRel);
- optiqOptimizedPlan = hepPlanner.findBestExp();
+ calciteOptimizedPlan = hepPlanner.findBestExp();
if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
LOG.debug("CBO Planning details:\n");
- LOG.debug("Original Plan:\n" + RelOptUtil.toString(optiqGenPlan));
+ LOG.debug("Original Plan:\n" + RelOptUtil.toString(calciteGenPlan));
LOG.debug("Plan After PPD, PartPruning, ColumnPruning:\n"
- + RelOptUtil.toString(optiqPreCboPlan));
+ + RelOptUtil.toString(calcitePreCboPlan));
LOG.debug("Plan After Join Reordering:\n"
- + RelOptUtil.toString(optiqOptimizedPlan, SqlExplainLevel.ALL_ATTRIBUTES));
+ + RelOptUtil.toString(calciteOptimizedPlan, SqlExplainLevel.ALL_ATTRIBUTES));
}
- return optiqOptimizedPlan;
+ return calciteOptimizedPlan;
}
public RelNode applyPreCBOTransforms(RelNode basePlan, RelMetadataProvider mdProvider) {
@@ -12670,37 +12668,37 @@ public RelNode applyPreCBOTransforms(RelNode basePlan, RelMetadataProvider mdPro
// Push Down Semi Joins
basePlan = hepPlan(basePlan, true, mdProvider,
- PushSemiJoinPastJoinRule.INSTANCE,
- new PushSemiJoinPastFilterRule(HiveFilterRel.DEFAULT_FILTER_FACTORY),
- new PushSemiJoinPastProjectRule(HiveProjectRel.DEFAULT_PROJECT_FACTORY));
+ SemiJoinJoinTransposeRule.INSTANCE,
+ SemiJoinFilterTransposeRule.INSTANCE,
+ SemiJoinProjectTransposeRule.INSTANCE);
basePlan = hepPlan(basePlan, true, mdProvider,
- new PushFilterPastProjectRule(
- FilterRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class,
- HiveProjectRel.DEFAULT_PROJECT_FACTORY), new PushFilterPastSetOpRule(
- HiveFilterRel.DEFAULT_FILTER_FACTORY), new MergeFilterRule(
- HiveFilterRel.DEFAULT_FILTER_FACTORY), HivePushFilterPastJoinRule.JOIN,
- HivePushFilterPastJoinRule.FILTER_ON_JOIN,
+ new FilterProjectTransposeRule(
+ Filter.class, HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class,
+ HiveProject.DEFAULT_PROJECT_FACTORY), new FilterSetOpTransposeRule(
+ HiveFilter.DEFAULT_FILTER_FACTORY), new FilterMergeRule(
+ HiveFilter.DEFAULT_FILTER_FACTORY), HiveFilterJoinRule.JOIN,
+ HiveFilterJoinRule.FILTER_ON_JOIN,
new FilterAggregateTransposeRule(
- FilterRelBase.class,
- HiveFilterRel.DEFAULT_FILTER_FACTORY,
- AggregateRelBase.class));
+ Filter.class,
+ HiveFilter.DEFAULT_FILTER_FACTORY,
+ Aggregate.class));
- basePlan = hepPlan(basePlan, false, mdProvider, new TransitivePredicatesOnJoinRule(
- JoinRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY),
- // TODO: Enable it after OPTIQ-407 is fixed
+ basePlan = hepPlan(basePlan, false, mdProvider, new JoinPushTransitivePredicatesRule(
+ Join.class, HiveFilter.DEFAULT_FILTER_FACTORY),
+ // TODO: Enable it after CALCITE-407 is fixed
//RemoveTrivialProjectRule.INSTANCE,
- new HivePartitionPrunerRule(SemanticAnalyzer.this.conf));
+ new HivePartitionPruneRule(SemanticAnalyzer.this.conf));
- RelFieldTrimmer fieldTrimmer = new RelFieldTrimmer(null, HiveProjectRel.DEFAULT_PROJECT_FACTORY,
- HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveJoinRel.HIVE_JOIN_FACTORY, RelFactories.DEFAULT_SEMI_JOIN_FACTORY,
- HiveSortRel.HIVE_SORT_REL_FACTORY, HiveAggregateRel.HIVE_AGGR_REL_FACTORY, HiveUnionRel.UNION_REL_FACTORY);
+ RelFieldTrimmer fieldTrimmer = new RelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
+ HiveFilter.DEFAULT_FILTER_FACTORY, HiveJoin.HIVE_JOIN_FACTORY, RelFactories.DEFAULT_SEMI_JOIN_FACTORY,
+ HiveSort.HIVE_SORT_REL_FACTORY, HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
basePlan = fieldTrimmer.trim(basePlan);
basePlan = hepPlan(basePlan, true, mdProvider,
- new PushFilterPastProjectRule(FilterRelBase.class,
- HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class,
- HiveProjectRel.DEFAULT_PROJECT_FACTORY));
+ new FilterProjectTransposeRule(Filter.class,
+ HiveFilter.DEFAULT_FILTER_FACTORY, HiveProject.class,
+ HiveProject.DEFAULT_PROJECT_FACTORY));
return basePlan;
}
@@ -12736,7 +12734,7 @@ private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges,
@SuppressWarnings("nls")
private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftRel,
String rightalias, RelNode rightRel) throws SemanticException {
- HiveUnionRel unionRel = null;
+ HiveUnion unionRel = null;
// 1. Get Row Resolvers, Column map for original left and right input of
// Union Rel
@@ -12768,7 +12766,7 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode
+ " does not have the field " + field));
}
if (!lInfo.getInternalName().equals(rInfo.getInternalName())) {
- throw new OptiqSemanticException(generateErrorMessage(tabref,
+ throw new CalciteSemanticException(generateErrorMessage(tabref,
"Schema of both sides of union should match: field " + field + ":"
+ " appears on the left side of the UNION at column position: "
+ getPositionFromInternalName(lInfo.getInternalName())
@@ -12780,7 +12778,7 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode
TypeInfo commonTypeInfo = FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(),
rInfo.getType());
if (commonTypeInfo == null) {
- throw new OptiqSemanticException(generateErrorMessage(tabref,
+ throw new CalciteSemanticException(generateErrorMessage(tabref,
"Schema of both sides of union should match: Column " + field + " is of type "
+ lInfo.getType().getTypeName() + " on first table and type "
+ rInfo.getType().getTypeName() + " on second table"));
@@ -12800,7 +12798,7 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode
unionoutRR.put(unionalias, field, unionColInfo);
}
- // 4. Determine which columns requires cast on left/right input (Optiq
+ // 4. Determine which columns requires cast on left/right input (Calcite
// requires exact types on both sides of union)
boolean leftNeedsTypeCast = false;
boolean rightNeedsTypeCast = false;
@@ -12842,11 +12840,11 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode
RelNode unionLeftInput = leftRel;
RelNode unionRightInput = rightRel;
if (leftNeedsTypeCast) {
- unionLeftInput = HiveProjectRel.create(leftRel, leftProjs, leftRel.getRowType()
+ unionLeftInput = HiveProject.create(leftRel, leftProjs, leftRel.getRowType()
.getFieldNames());
}
if (rightNeedsTypeCast) {
- unionRightInput = HiveProjectRel.create(rightRel, rightProjs, rightRel.getRowType()
+ unionRightInput = HiveProject.create(rightRel, rightProjs, rightRel.getRowType()
.getFieldNames());
}
@@ -12854,12 +12852,12 @@ private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode
ImmutableList.Builder bldr = new ImmutableList.Builder();
bldr.add(unionLeftInput);
bldr.add(unionRightInput);
- unionRel = new HiveUnionRel(cluster, TraitsUtil.getDefaultTraitSet(cluster),
+ unionRel = new HiveUnion(cluster, TraitsUtil.getDefaultTraitSet(cluster),
bldr.build());
relToHiveRR.put(unionRel, unionoutRR);
- relToHiveColNameOptiqPosMap.put(unionRel,
- this.buildHiveToOptiqColumnMap(unionoutRR, unionRel));
+ relToHiveColNameCalcitePosMap.put(unionRel,
+ this.buildHiveToCalciteColumnMap(unionoutRR, unionRel));
return unionRel;
}
@@ -12884,7 +12882,7 @@ private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJ
}
// 2. Construct ExpressionNodeDesc representing Join Condition
- RexNode optiqJoinCond = null;
+ RexNode calciteJoinCond = null;
if (joinCond != null) {
JoinTypeCheckCtx jCtx = new JoinTypeCheckCtx(leftRR, rightRR, hiveJoinType);
Map exprNodes = JoinCondTypeCheckProcFactory.genExprNode(joinCond,
@@ -12898,10 +12896,10 @@ private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJ
List inputRels = new ArrayList();
inputRels.add(leftRel);
inputRels.add(rightRel);
- optiqJoinCond = RexNodeConverter.convert(cluster, joinCondnExprNode, inputRels,
- relToHiveRR, relToHiveColNameOptiqPosMap, false);
+ calciteJoinCond = RexNodeConverter.convert(cluster, joinCondnExprNode, inputRels,
+ relToHiveRR, relToHiveColNameCalcitePosMap, false);
} else {
- optiqJoinCond = cluster.getRexBuilder().makeLiteral(true);
+ calciteJoinCond = cluster.getRexBuilder().makeLiteral(true);
}
// 3. Validate that join condition is legal (i.e no function refering to
@@ -12911,24 +12909,24 @@ private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJ
// 4. Construct Join Rel Node
boolean leftSemiJoin = false;
- JoinRelType optiqJoinType;
+ JoinRelType calciteJoinType;
switch (hiveJoinType) {
case LEFTOUTER:
- optiqJoinType = JoinRelType.LEFT;
+ calciteJoinType = JoinRelType.LEFT;
break;
case RIGHTOUTER:
- optiqJoinType = JoinRelType.RIGHT;
+ calciteJoinType = JoinRelType.RIGHT;
break;
case FULLOUTER:
- optiqJoinType = JoinRelType.FULL;
+ calciteJoinType = JoinRelType.FULL;
break;
case LEFTSEMI:
- optiqJoinType = JoinRelType.INNER;
+ calciteJoinType = JoinRelType.INNER;
leftSemiJoin = true;
break;
case INNER:
default:
- optiqJoinType = JoinRelType.INNER;
+ calciteJoinType = JoinRelType.INNER;
break;
}
@@ -12938,7 +12936,7 @@ private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJ
List rightJoinKeys = new ArrayList();
RexNode nonEquiConds = RelOptUtil.splitJoinCondition(sysFieldList, leftRel, rightRel,
- optiqJoinCond, leftJoinKeys, rightJoinKeys, null, null);
+ calciteJoinCond, leftJoinKeys, rightJoinKeys, null, null);
if (!nonEquiConds.isAlwaysTrue()) {
throw new SemanticException("Non equality condition not supported in Semi-Join"
@@ -12948,19 +12946,19 @@ private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJ
RelNode[] inputRels = new RelNode[] { leftRel, rightRel };
final List leftKeys = new ArrayList();
final List rightKeys = new ArrayList();
- optiqJoinCond = HiveOptiqUtil.projectNonColumnEquiConditions(
- HiveProjectRel.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0,
+ calciteJoinCond = HiveCalciteUtil.projectNonColumnEquiConditions(
+ HiveProject.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0,
leftKeys, rightKeys);
- joinRel = new SemiJoinRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION),
- inputRels[0], inputRels[1], optiqJoinCond, ImmutableIntList.copyOf(leftKeys),
+ joinRel = new SemiJoin(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
+ inputRels[0], inputRels[1], calciteJoinCond, ImmutableIntList.copyOf(leftKeys),
ImmutableIntList.copyOf(rightKeys));
} else {
- joinRel = HiveJoinRel.getJoin(cluster, leftRel, rightRel, optiqJoinCond, optiqJoinType,
+ joinRel = HiveJoin.getJoin(cluster, leftRel, rightRel, calciteJoinCond, calciteJoinType,
leftSemiJoin);
}
// 5. Add new JoinRel & its RR to the maps
- relToHiveColNameOptiqPosMap.put(joinRel, this.buildHiveToOptiqColumnMap(joinRR, joinRel));
+ relToHiveColNameCalcitePosMap.put(joinRel, this.buildHiveToCalciteColumnMap(joinRR, joinRel));
relToHiveRR.put(joinRel, joinRR);
return joinRel;
@@ -12986,7 +12984,7 @@ private RelNode genJoinLogicalPlan(ASTNode joinParseTree, Map a
String msg = String.format("UNIQUE JOIN is currently not supported in CBO,"
+ " turn off cbo to use UNIQUE JOIN.");
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 1. Determine Join Type
@@ -13060,18 +13058,18 @@ private RelNode genJoinLogicalPlan(ASTNode joinParseTree, Map a
private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticException {
RowResolver rr = new RowResolver();
- HiveTableScanRel tableRel = null;
+ HiveTableScan tableRel = null;
try {
- // 1. If the table has a Sample specified, bail from Optiq path.
+ // 1. If the table has a Sample specified, bail from Calcite path.
if ( qb.getParseInfo().getTabSample(tableAlias) != null ||
SemanticAnalyzer.this.nameToSplitSample.containsKey(tableAlias)) {
String msg = String.format("Table Sample specified for %s." +
" Currently we don't support Table Sample clauses in CBO," +
" turn off cbo for queries on tableSamples.", tableAlias);
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 2. Get Table Metadata
@@ -13136,13 +13134,13 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc
noColsMissingStats);
// 5. Build Hive Table Scan Rel
- tableRel = new HiveTableScanRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION), optTable,
+ tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable,
rowType);
// 6. Add Schema(RR) to RelNode-Schema map
- ImmutableMap hiveToOptiqColMap = buildHiveToOptiqColumnMap(rr, tableRel);
+ ImmutableMap hiveToCalciteColMap = buildHiveToCalciteColumnMap(rr, tableRel);
relToHiveRR.put(tableRel, rr);
- relToHiveColNameOptiqPosMap.put(tableRel, hiveToOptiqColMap);
+ relToHiveColNameCalcitePosMap.put(tableRel, hiveToCalciteColMap);
} catch (Exception e) {
if (e instanceof SemanticException) {
throw (SemanticException) e;
@@ -13159,21 +13157,21 @@ private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel) throws Sema
if (filterCondn instanceof ExprNodeConstantDesc &&
!filterCondn.getTypeString().equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
// queries like select * from t1 where 'foo';
- // Optiq's rule PushFilterThroughProject chokes on it. Arguably, we can insert a cast to
+ // Calcite's rule PushFilterThroughProject chokes on it. Arguably, we can insert a cast to
// boolean in such cases, but since Postgres, Oracle and MS SQL server fail on compile time
// for such queries, its an arcane corner case, not worth of adding that complexity.
- throw new OptiqSemanticException("Filter expression with non-boolean return type.");
+ throw new CalciteSemanticException("Filter expression with non-boolean return type.");
}
- ImmutableMap hiveColNameOptiqPosMap = this.relToHiveColNameOptiqPosMap
+ ImmutableMap hiveColNameCalcitePosMap = this.relToHiveColNameCalcitePosMap
.get(srcRel);
RexNode convertedFilterExpr = new RexNodeConverter(cluster, srcRel.getRowType(),
- hiveColNameOptiqPosMap, 0, true).convert(filterCondn);
+ hiveColNameCalcitePosMap, 0, true).convert(filterCondn);
RexNode factoredFilterExpr = RexUtil.pullFactors(cluster.getRexBuilder(), convertedFilterExpr);
- RelNode filterRel = new HiveFilterRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION),
+ RelNode filterRel = new HiveFilter(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
srcRel, factoredFilterExpr);
- this.relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap);
+ this.relToHiveColNameCalcitePosMap.put(filterRel, hiveColNameCalcitePosMap);
relToHiveRR.put(filterRel, relToHiveRR.get(srcRel));
- relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap);
+ relToHiveColNameCalcitePosMap.put(filterRel, hiveColNameCalcitePosMap);
return filterRel;
}
@@ -13187,8 +13185,8 @@ private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel,
* #genFilterPlan} - for now we will support the same behavior as non CBO
* route. - but plan to allow nested SubQueries(Restriction.9.m) and
* multiple SubQuery expressions(Restriction.8.m). This requires use to
- * utilize Optiq's Decorrelation mechanics, and for Optiq to fix/flush out
- * Null semantics(OPTIQ-373) - besides only the driving code has been
+ * utilize Calcite's Decorrelation mechanics, and for Calcite to fix/flush out
+ * Null semantics(CALCITE-373) - besides only the driving code has been
* copied. Most of the code which is SubQueryUtils and QBSubQuery is
* reused.
*/
@@ -13222,7 +13220,7 @@ private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel,
RowResolver inputRR = relToHiveRR.get(srcRel);
RowResolver outerQBRR = inputRR;
ImmutableMap outerQBPosMap =
- relToHiveColNameOptiqPosMap.get(srcRel);
+ relToHiveColNameCalcitePosMap.get(srcRel);
for (int i = 0; i < subQueries.size(); i++) {
ASTNode subQueryAST = subQueries.get(i);
@@ -13314,7 +13312,7 @@ private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel,
}
}
relToHiveRR.put(srcRel, outerQBRR);
- relToHiveColNameOptiqPosMap.put(srcRel, outerQBPosMap);
+ relToHiveColNameCalcitePosMap.put(srcRel, outerQBPosMap);
return srcRel;
}
@@ -13326,20 +13324,20 @@ private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws Sema
RowResolver oRR = new RowResolver();
RowResolver.add(oRR, iRR, numColumns);
- List optiqColLst = new ArrayList();
+ List calciteColLst = new ArrayList();
List oFieldNames = new ArrayList();
RelDataType iType = srcRel.getRowType();
for (int i = 0; i < iType.getFieldCount(); i++) {
RelDataTypeField fType = iType.getFieldList().get(i);
String fName = iType.getFieldNames().get(i);
- optiqColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i));
+ calciteColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i));
oFieldNames.add(fName);
}
- HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames);
+ HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, oFieldNames);
- this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(oRR, selRel));
+ this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(oRR, selRel));
this.relToHiveRR.put(selRel, oRR);
return selRel;
}
@@ -13380,11 +13378,11 @@ private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbC
RexNodeConverter converter, HashMap rexNodeToPosMap,
Integer childProjLstIndx) throws SemanticException {
- // 1. Get agg fn ret type in Optiq
+ // 1. Get agg fn ret type in Calcite
RelDataType aggFnRetType = TypeConverter.convert(agg.m_returnType,
this.cluster.getTypeFactory());
- // 2. Convert Agg Fn args and type of args to Optiq
+ // 2. Convert Agg Fn args and type of args to Calcite
// TODO: Does HQL allows expressions as aggregate args or can it only be
// projections from child?
Integer inputIndx;
@@ -13407,9 +13405,9 @@ private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbC
aggArgRelDTBldr.add(TypeConverter.convert(expr.getTypeInfo(), dtFactory));
}
- // 3. Get Aggregation FN from Optiq given name, ret type and input arg
+ // 3. Get Aggregation FN from Calcite given name, ret type and input arg
// type
- final Aggregation aggregation = SqlFunctionConverter.getOptiqAggFn(agg.m_udfName,
+ final SqlAggFunction aggregation = SqlFunctionConverter.getCalciteAggFn(agg.m_udfName,
aggArgRelDTBldr.build(), aggFnRetType);
return new AggregateCall(aggregation, agg.m_distinct, argList, aggFnRetType, null);
@@ -13418,22 +13416,23 @@ private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbC
private RelNode genGBRelNode(List gbExprs, List aggInfoLst,
RelNode srcRel) throws SemanticException {
RowResolver gbInputRR = this.relToHiveRR.get(srcRel);
- ImmutableMap posMap = this.relToHiveColNameOptiqPosMap.get(srcRel);
+ ImmutableMap posMap = this.relToHiveColNameCalcitePosMap.get(srcRel);
RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(),
posMap, 0, false);
final List gbChildProjLst = Lists.newArrayList();
final HashMap rexNodeToPosMap = new HashMap();
- final BitSet groupSet = new BitSet();
+ final List groupSetPositions = Lists.newArrayList();
Integer gbIndx = 0;
RexNode rnd;
for (ExprNodeDesc key : gbExprs) {
rnd = converter.convert(key);
gbChildProjLst.add(rnd);
- groupSet.set(gbIndx);
+ groupSetPositions.add(gbIndx);
rexNodeToPosMap.put(rnd.toString(), gbIndx);
gbIndx++;
}
+ final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
List aggregateCalls = Lists.newArrayList();
int i = aggInfoLst.size();
@@ -13447,12 +13446,12 @@ private RelNode genGBRelNode(List gbExprs, List aggInfoLs
// first element from srcRel
gbChildProjLst.add(this.cluster.getRexBuilder().makeInputRef(srcRel, 0));
}
- RelNode gbInputRel = HiveProjectRel.create(srcRel, gbChildProjLst, null);
+ RelNode gbInputRel = HiveProject.create(srcRel, gbChildProjLst, null);
- HiveRel aggregateRel = null;
+ HiveRelNode aggregateRel = null;
try {
- aggregateRel = new HiveAggregateRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION),
- gbInputRel, groupSet, aggregateCalls);
+ aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
+ gbInputRel, false, groupSet, null, aggregateCalls);
} catch (InvalidRelException e) {
throw new SemanticException(e);
}
@@ -13592,7 +13591,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
RelNode gbRel = null;
QBParseInfo qbp = getQBParseInfo(qb);
- // 0. for GSets, Cube, Rollup, bail from Optiq path.
+ // 0. for GSets, Cube, Rollup, bail from Calcite path.
if (!qbp.getDestRollups().isEmpty()
|| !qbp.getDestGroupingSets().isEmpty()
|| !qbp.getDestCubes().isEmpty()) {
@@ -13612,7 +13611,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
+ " clauses in CBO," + " turn off cbo for these queries.",
gbyClause);
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 1. Gather GB Expressions (AST) (GB + Aggregations)
@@ -13641,7 +13640,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
grpbyExpr, new TypeCheckCtx(groupByInputRowResolver));
ExprNodeDesc grpbyExprNDesc = astToExprNDescMap.get(grpbyExpr);
if (grpbyExprNDesc == null)
- throw new OptiqSemanticException("Invalid Column Reference: " + grpbyExpr.dump());
+ throw new CalciteSemanticException("Invalid Column Reference: " + grpbyExpr.dump());
addToGBExpr(groupByOutputRowResolver, groupByInputRowResolver, grpbyExpr,
grpbyExprNDesc, gbExprNDescLst, outputColumnNames);
@@ -13682,8 +13681,8 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
}
gbRel = genGBRelNode(gbExprNDescLst, aggregations, srcRel);
- relToHiveColNameOptiqPosMap.put(gbRel,
- buildHiveToOptiqColumnMap(groupByOutputRowResolver, gbRel));
+ relToHiveColNameCalcitePosMap.put(gbRel,
+ buildHiveToCalciteColumnMap(groupByOutputRowResolver, gbRel));
this.relToHiveRR.put(gbRel, groupByOutputRowResolver);
}
@@ -13735,7 +13734,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
RexNode rnd;
RexNodeConverter converter = new RexNodeConverter(cluster, srcRel.getRowType(),
- relToHiveColNameOptiqPosMap.get(srcRel), 0, false);
+ relToHiveColNameCalcitePosMap.get(srcRel), 0, false);
int srcRelRecordSz = srcRel.getRowType().getFieldCount();
for (int i = 0; i < obASTExprLst.size(); i++) {
@@ -13751,7 +13750,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
rnd = converter.convert(obExprNDesc);
// 2.3 Determine the index of ob expr in child schema
- // NOTE: Optiq can not take compound exprs in OB without it being
+ // NOTE: Calcite can not take compound exprs in OB without it being
// present in the child (& hence we add a child Project Rel)
if (rnd instanceof RexInputRef) {
fieldIndex = ((RexInputRef) rnd).getIndex();
@@ -13763,7 +13762,7 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
}
// 2.4 Determine the Direction of order by
- org.eigenbase.rel.RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING;
+ org.apache.calcite.rel.RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING;
if (obASTExpr.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) {
order = RelFieldCollation.Direction.ASCENDING;
}
@@ -13785,7 +13784,7 @@ public RexNode apply(RelDataTypeField input) {
});
RowResolver obSyntheticProjectRR = new RowResolver();
if (!RowResolver.add(obSyntheticProjectRR, inputRR)) {
- throw new OptiqSemanticException(
+ throw new CalciteSemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
int vcolPos = inputRR.getRowSchema().getSignature().size();
@@ -13799,28 +13798,28 @@ public RexNode apply(RelDataTypeField input) {
if (outermostOB) {
if (!RowResolver.add(outputRR, inputRR)) {
- throw new OptiqSemanticException(
+ throw new CalciteSemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
} else {
if (!RowResolver.add(outputRR, obSyntheticProjectRR)) {
- throw new OptiqSemanticException(
+ throw new CalciteSemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
originalOBChild = srcRel;
}
} else {
if (!RowResolver.add(outputRR, inputRR)) {
- throw new OptiqSemanticException(
+ throw new CalciteSemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
}
// 4. Construct SortRel
- RelTraitSet traitSet = cluster.traitSetOf(HiveRel.CONVENTION);
+ RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION);
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations));
- sortRel = new HiveSortRel(cluster, traitSet, obInputRel, canonizedCollation, null, null);
+ sortRel = new HiveSort(cluster, traitSet, obInputRel, canonizedCollation, null, null);
// 5. Update the maps
// NOTE: Output RR for SortRel is considered same as its input; we may
@@ -13828,35 +13827,35 @@ public RexNode apply(RelDataTypeField input) {
// rowtype of sortrel is the type of it child; if child happens to be
// synthetic project that we introduced then that projectrel would
// contain the vc.
- ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR,
+ ImmutableMap hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR,
sortRel);
relToHiveRR.put(sortRel, outputRR);
- relToHiveColNameOptiqPosMap.put(sortRel, hiveColNameOptiqPosMap);
+ relToHiveColNameCalcitePosMap.put(sortRel, hiveColNameCalcitePosMap);
}
return (new Pair(sortRel, originalOBChild));
}
private RelNode genLimitLogicalPlan(QB qb, RelNode srcRel) throws SemanticException {
- HiveRel sortRel = null;
+ HiveRelNode sortRel = null;
QBParseInfo qbp = getQBParseInfo(qb);
Integer limit = qbp.getDestToLimit().get(qbp.getClauseNames().iterator().next());
if (limit != null) {
RexNode fetch = cluster.getRexBuilder().makeExactLiteral(BigDecimal.valueOf(limit));
- RelTraitSet traitSet = cluster.traitSetOf(HiveRel.CONVENTION);
+ RelTraitSet traitSet = cluster.traitSetOf(HiveRelNode.CONVENTION);
RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.EMPTY);
- sortRel = new HiveSortRel(cluster, traitSet, srcRel, canonizedCollation, null, fetch);
+ sortRel = new HiveSort(cluster, traitSet, srcRel, canonizedCollation, null, fetch);
RowResolver outputRR = new RowResolver();
if (!RowResolver.add(outputRR, relToHiveRR.get(srcRel))) {
- throw new OptiqSemanticException(
+ throw new CalciteSemanticException(
"Duplicates detected when adding columns to RR: see previous message");
}
- ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR,
+ ImmutableMap hiveColNameCalcitePosMap = buildHiveToCalciteColumnMap(outputRR,
sortRel);
relToHiveRR.put(sortRel, outputRR);
- relToHiveColNameOptiqPosMap.put(sortRel, hiveColNameOptiqPosMap);
+ relToHiveColNameCalcitePosMap.put(sortRel, hiveColNameCalcitePosMap);
}
return sortRel;
@@ -13969,29 +13968,29 @@ int getWindowSpecIndx(ASTNode wndAST) {
AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1,
this.relToHiveRR.get(srcRel));
- // 3. Get Optiq Return type for Agg Fn
+ // 3. Get Calcite Return type for Agg Fn
wHiveRetType = hiveAggInfo.m_returnType;
- RelDataType optiqAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType,
+ RelDataType calciteAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType,
this.cluster.getTypeFactory());
- // 4. Convert Agg Fn args to Optiq
- ImmutableMap posMap = this.relToHiveColNameOptiqPosMap.get(srcRel);
+ // 4. Convert Agg Fn args to Calcite
+ ImmutableMap posMap = this.relToHiveColNameCalcitePosMap.get(srcRel);
RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(),
posMap, 0, false);
- Builder optiqAggFnArgsBldr = ImmutableList. builder();
- Builder optiqAggFnArgsTypeBldr = ImmutableList. builder();
+ Builder calciteAggFnArgsBldr = ImmutableList. builder();
+ Builder calciteAggFnArgsTypeBldr = ImmutableList. builder();
RexNode rexNd = null;
for (int i = 0; i < hiveAggInfo.m_aggParams.size(); i++) {
- optiqAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i)));
- optiqAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i)
+ calciteAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i)));
+ calciteAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i)
.getTypeInfo(), this.cluster.getTypeFactory()));
}
- ImmutableList optiqAggFnArgs = optiqAggFnArgsBldr.build();
- ImmutableList optiqAggFnArgsType = optiqAggFnArgsTypeBldr.build();
+ ImmutableList calciteAggFnArgs = calciteAggFnArgsBldr.build();
+ ImmutableList calciteAggFnArgsType = calciteAggFnArgsTypeBldr.build();
- // 5. Get Optiq Agg Fn
- final SqlAggFunction optiqAggFn = SqlFunctionConverter.getOptiqAggFn(hiveAggInfo.m_udfName,
- optiqAggFnArgsType, optiqAggFnRetType);
+ // 5. Get Calcite Agg Fn
+ final SqlAggFunction calciteAggFn = SqlFunctionConverter.getCalciteAggFn(hiveAggInfo.m_udfName,
+ calciteAggFnArgsType, calciteAggFnRetType);
// 6. Translate Window spec
RowResolver inputRR = relToHiveRR.get(srcRel);
@@ -14003,7 +14002,7 @@ int getWindowSpecIndx(ASTNode wndAST) {
boolean isRows = ((wndSpec.windowFrame.start instanceof RangeBoundarySpec) || (wndSpec.windowFrame.end instanceof RangeBoundarySpec)) ? true
: false;
- w = cluster.getRexBuilder().makeOver(optiqAggFnRetType, optiqAggFn, optiqAggFnArgs,
+ w = cluster.getRexBuilder().makeOver(calciteAggFnRetType, calciteAggFn, calciteAggFnArgs,
partitionKeys, ImmutableList. copyOf(orderKeys), lowerBound,
upperBound, isRows, true, false);
} else {
@@ -14028,7 +14027,7 @@ private RelNode genSelectForWindowing(
RowResolver inputRR = this.relToHiveRR.get(srcRel);
// 2. Get RexNodes for original Projections from below
List projsForWindowSelOp = new ArrayList(
- HiveOptiqUtil.getProjsFromBelowAsInputRef(srcRel));
+ HiveCalciteUtil.getProjsFromBelowAsInputRef(srcRel));
// 3. Construct new Row Resolver with everything from below.
RowResolver out_rwsch = new RowResolver();
@@ -14058,15 +14057,15 @@ private RelNode genSelectForWindowing(
return genSelectRelNode(projsForWindowSelOp, out_rwsch, srcRel);
}
- private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsch,
- RelNode srcRel) throws OptiqSemanticException {
+ private RelNode genSelectRelNode(List calciteColLst, RowResolver out_rwsch,
+ RelNode srcRel) throws CalciteSemanticException {
// 1. Build Column Names
Set colNamesSet = new HashSet();
List cInfoLst = out_rwsch.getRowSchema().getSignature();
ArrayList columnNames = new ArrayList();
String[] qualifiedColNames;
String tmpColAlias;
- for (int i = 0; i < optiqColLst.size(); i++) {
+ for (int i = 0; i < calciteColLst.size(); i++) {
ColumnInfo cInfo = cInfoLst.get(i);
qualifiedColNames = out_rwsch.reverseLookup(cInfo.getInternalName());
/*
@@ -14094,23 +14093,23 @@ private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsc
columnNames.add(tmpColAlias);
}
- // 3 Build Optiq Rel Node for project using converted projections & col
+ // 3 Build Calcite Rel Node for project using converted projections & col
// names
- HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, columnNames);
+ HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, columnNames);
// 4. Keep track of colname-to-posmap && RR for new select
- this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel));
+ this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(out_rwsch, selRel));
this.relToHiveRR.put(selRel, out_rwsch);
return selRel;
}
- private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsch,
- RelNode srcRel, boolean removethismethod) throws OptiqSemanticException {
+ private RelNode genSelectRelNode(List calciteColLst, RowResolver out_rwsch,
+ RelNode srcRel, boolean removethismethod) throws CalciteSemanticException {
// 1. Build Column Names
// TODO: Should this be external names
ArrayList columnNames = new ArrayList();
- for (int i = 0; i < optiqColLst.size(); i++) {
+ for (int i = 0; i < calciteColLst.size(); i++) {
columnNames.add(getColumnInternalName(i));
}
@@ -14126,12 +14125,12 @@ public String apply(String hName) {
}
});
- // 3 Build Optiq Rel Node for project using converted projections & col
+ // 3 Build Calcite Rel Node for project using converted projections & col
// names
- HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames);
+ HiveRelNode selRel = HiveProject.create(srcRel, calciteColLst, oFieldNames);
// 4. Keep track of colname-to-posmap && RR for new select
- this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel));
+ this.relToHiveColNameCalcitePosMap.put(selRel, buildHiveToCalciteColumnMap(out_rwsch, selRel));
this.relToHiveRR.put(selRel, out_rwsch);
return selRel;
@@ -14178,7 +14177,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
String msg = String.format("Hint specified for %s."
+ " Currently we don't support hints in CBO, turn off cbo to use hints.", hint);
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 4. Bailout if select involves Transform
@@ -14187,7 +14186,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
String msg = String.format("SELECT TRANSFORM is currently not supported in CBO,"
+ " turn off cbo to use TRANSFORM.");
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 5. Bailout if select involves UDTF
@@ -14200,7 +14199,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
String msg = String.format("UDTF " + funcName + " is currently not supported in CBO,"
+ " turn off cbo to use UDTF " + funcName);
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
}
@@ -14262,9 +14261,9 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr,
col_list, excludedColumns, inputRR, null, pos, out_rwsch, tabAliasesForAllProjs,
true);
- } else if (expr.toStringTree().contains("TOK_FUNCTIONDI") && !(srcRel instanceof HiveAggregateRel)) {
+ } else if (expr.toStringTree().contains("TOK_FUNCTIONDI") && !(srcRel instanceof HiveAggregate)) {
// Likely a malformed query eg, select hash(distinct c1) from t1;
- throw new OptiqSemanticException("Distinct without an aggreggation.");
+ throw new CalciteSemanticException("Distinct without an aggreggation.");
} else {
// Case when this is an expression
TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
@@ -14282,7 +14281,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp)
.isSkewedCol() : false);
if (!out_rwsch.putWithCheck(tabAlias, colAlias, null, colInfo)) {
- throw new OptiqSemanticException("Cannot add column to RR: " + tabAlias + "."
+ throw new CalciteSemanticException("Cannot add column to RR: " + tabAlias + "."
+ colAlias + " => " + colInfo + " due to duplication, see previous warnings");
}
@@ -14299,16 +14298,16 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep
}
selectStar = selectStar && exprList.getChildCount() == posn + 1;
- // 7. Convert Hive projections to Optiq
- List optiqColLst = new ArrayList();
+ // 7. Convert Hive projections to Calcite
+ List calciteColLst = new ArrayList();
RexNodeConverter rexNodeConv = new RexNodeConverter(cluster, srcRel.getRowType(),
buildHiveColNameToInputPosMap(col_list, inputRR), 0, false);
for (ExprNodeDesc colExpr : col_list) {
- optiqColLst.add(rexNodeConv.convert(colExpr));
+ calciteColLst.add(rexNodeConv.convert(colExpr));
}
- // 8. Build Optiq Rel
- RelNode selRel = genSelectRelNode(optiqColLst, out_rwsch, srcRel);
+ // 8. Build Calcite Rel
+ RelNode selRel = genSelectRelNode(calciteColLst, out_rwsch, srcRel);
return selRel;
}
@@ -14349,7 +14348,7 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticExcept
if (LOG.isDebugEnabled()) {
LOG.debug(msg + " because it: " + reason);
}
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
// 1. Build Rel For Src (SubQuery, TS, Join)
@@ -14368,7 +14367,7 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticExcept
if (aliasToRel.isEmpty()) {
// // This may happen for queries like select 1; (no source table)
// We can do following which is same, as what Hive does.
- // With this, we will be able to generate Optiq plan.
+ // With this, we will be able to generate Calcite plan.
// qb.getMetaData().setSrcForAlias(DUMMY_TABLE, getDummyTable());
// RelNode op = genTableLogicalPlan(DUMMY_TABLE, qb);
// qb.addAlias(DUMMY_TABLE);
@@ -14378,7 +14377,7 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticExcept
// table
// So, for now lets just disable this. Anyway there is nothing much to
// optimize in such cases.
- throw new OptiqSemanticException("Unsupported");
+ throw new CalciteSemanticException("Unsupported");
}
// 1.3 process join
@@ -14417,7 +14416,7 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticExcept
// 8. Introduce top constraining select if needed.
// NOTES:
- // 1. Optiq can not take an expr in OB; hence it needs to be added as VC
+ // 1. Calcite can not take an expr in OB; hence it needs to be added as VC
// in the input select; In such cases we need to introduce a select on top
// to ensure VC is not visible beyond Limit, OB.
// 2. Hive can not preserve order across select. In subqueries OB is used
@@ -14428,8 +14427,8 @@ private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticExcept
// limitation(#2) stated above. The RR for OB will not include VC. Thus
// Result Schema will not include exprs used by top OB. During AST Conv,
// in the PlanModifierForASTConv we would modify the top level OB to
- // migrate exprs from input sel to SortRel (Note that Optiq doesn't
- // support this; but since we are done with Optiq at this point its OK).
+ // migrate exprs from input sel to SortRel (Note that Calcite doesn't
+ // support this; but since we are done with Calcite at this point its OK).
if (topConstrainingProjArgsRel != null) {
List originalInputRefs = Lists.transform(topConstrainingProjArgsRel.getRowType()
.getFieldList(), new Function() {
@@ -14465,7 +14464,7 @@ public RexNode apply(RelDataTypeField input) {
newRR.put(alias, tmp[1], newCi);
}
relToHiveRR.put(srcRel, newRR);
- relToHiveColNameOptiqPosMap.put(srcRel, buildHiveToOptiqColumnMap(newRR, srcRel));
+ relToHiveColNameCalcitePosMap.put(srcRel, buildHiveToCalciteColumnMap(newRR, srcRel));
}
if (LOG.isDebugEnabled()) {
@@ -14482,9 +14481,9 @@ private RelNode genGBHavingLogicalPlan(QB qb, RelNode srcRel, Map 0;
- throw new OptiqSemanticException("Having clause without any group-by.");
+ throw new CalciteSemanticException("Having clause without any group-by.");
}
validateNoHavingReferenceToAlias(qb, (ASTNode) havingClause.getChild(0));
gbFilter = genFilterRelNode(qb, (ASTNode) havingClause.getChild(0), srcRel, aliasToRel,
@@ -14498,10 +14497,10 @@ private RelNode genGBHavingLogicalPlan(QB qb, RelNode srcRel, Map exprToAlias = qbPI.getAllExprToColumnAlias();
@@ -14553,13 +14552,13 @@ public Object post(Object t) {
+ " Turn off cbo for these queries.", aliasToCheck,
havingClause);
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
}
}
- private ImmutableMap buildHiveToOptiqColumnMap(RowResolver rr, RelNode rNode) {
+ private ImmutableMap buildHiveToCalciteColumnMap(RowResolver rr, RelNode rNode) {
ImmutableMap.Builder b = new ImmutableMap.Builder();
int i = 0;
for (ColumnInfo ci : rr.getRowSchema().getSignature()) {
@@ -14584,13 +14583,13 @@ public Object post(Object t) {
return hiveColNameToInputPosMapBuilder.build();
}
- private QBParseInfo getQBParseInfo(QB qb) throws OptiqSemanticException {
+ private QBParseInfo getQBParseInfo(QB qb) throws CalciteSemanticException {
QBParseInfo qbp = qb.getParseInfo();
if (qbp.getClauseNames().size() > 1) {
String msg = String.format("Multi Insert is currently not supported in CBO,"
+ " turn off cbo to use Multi Insert.");
LOG.debug(msg);
- throw new OptiqSemanticException(msg);
+ throw new CalciteSemanticException(msg);
}
return qbp;
}