diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml index a7a0f8f..959704c 100644 --- a/druid-handler/pom.xml +++ b/druid-handler/pom.xml @@ -198,6 +198,17 @@ org.apache.calcite calcite-druid ${calcite.version} + + + org.apache.calcite.avatica + avatica-core + + + + + org.apache.calcite.avatica + avatica + ${avatica.version} joda-time diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java index d1b2a72..a229104 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java @@ -147,7 +147,11 @@ private static String createSelectStarQuery(String dataSource) throws IOExceptio // Create Select query SelectQueryBuilder builder = new Druids.SelectQueryBuilder(); builder.dataSource(dataSource); - builder.intervals(Arrays.asList(DruidTable.DEFAULT_INTERVAL)); + final List intervals = Arrays.asList(); + new Interval(DruidTable.DEFAULT_INTERVAL.getStartMillis(), + DruidTable.DEFAULT_INTERVAL.getEndMillis(), + ISOChronology.getInstanceUTC()); + builder.intervals(intervals); builder.pagingSpec(PagingSpec.newSpec(1)); Map context = new HashMap<>(); context.put(Constants.DRUID_QUERY_FETCH, false); @@ -322,11 +326,15 @@ private static String createSelectStarQuery(String dataSource) throws IOExceptio private static List> createSplitsIntervals(List intervals, int numSplits ) { - final long totalTime = DruidDateTimeUtils.extractTotalTime(intervals); + long startTime = intervals.get(0).getStartMillis(); long endTime = startTime; long currTime = 0; List> newIntervals = new ArrayList<>(); + long totalTime = 0; + for (Interval interval: intervals) { + totalTime += interval.getEndMillis() - interval.getStartMillis(); + } for (int i = 0, posIntervals = 0; i < numSplits; i++) { final long rangeSize = Math.round((double) (totalTime * (i + 1)) / numSplits) - Math.round((double) (totalTime * i) / numSplits); diff --git a/pom.xml b/pom.xml index 5121770..45e2887 100644 --- a/pom.xml +++ b/pom.xml @@ -112,10 +112,10 @@ 3.5.2 1.5.6 0.1 - 1.8.0 + 1.10.0-SNAPSHOT 1.7.7 0.8.0.RELEASE - 1.10.0 + 1.12.0-SNAPSHOT 4.2.1 4.1.6 4.1.7 diff --git a/ql/pom.xml b/ql/pom.xml index 1e6ba9a..ef14584 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -379,12 +379,22 @@ com.fasterxml.jackson.core jackson-core + + org.apache.calcite.avatica + avatica-core + org.apache.calcite calcite-druid ${calcite.version} + + + org.apache.calcite.avatica + avatica-core + + org.apache.calcite.avatica diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java index 89c87cd..02a9b4a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -34,6 +34,7 @@ import org.apache.calcite.adapter.druid.DruidQuery; import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; +import org.apache.calcite.adapter.druid.LocalInterval; import org.apache.calcite.jdbc.JavaTypeFactoryImpl; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptMaterialization; @@ -310,7 +311,7 @@ private static RelNode createTableScan(Table viewTable) { } metrics.add(field.getName()); } - List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); + List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java index 4edc4df..0b94b8a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java @@ -24,6 +24,7 @@ import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.type.OperandTypes; import org.apache.calcite.sql.type.ReturnTypes; +import org.apache.calcite.sql.type.SqlTypeTransforms; import com.google.common.collect.Sets; @@ -42,9 +43,10 @@ Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND); private HiveExtractDate(String name) { - super(name, SqlKind.EXTRACT, ReturnTypes.INTEGER_NULLABLE, null, - OperandTypes.INTERVALINTERVAL_INTERVALDATETIME, - SqlFunctionCategory.SYSTEM); + super(name, SqlKind.EXTRACT, + ReturnTypes.cascade(ReturnTypes.INTEGER, SqlTypeTransforms.FORCE_NULLABLE), null, + OperandTypes.INTERVALINTERVAL_INTERVALDATETIME, + SqlFunctionCategory.SYSTEM); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java index 38d7906..81de33f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java @@ -21,6 +21,7 @@ import java.util.List; import org.apache.calcite.plan.RelOptMaterialization; +import org.apache.calcite.plan.RelOptMaterializations; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptRuleCall; @@ -77,7 +78,7 @@ protected void apply(RelOptRuleCall call, Project project, Filter filter, TableS // Costing is done in transformTo(), so we call it repeatedly with all applicable // materialized views and cheapest one will be picked List applicableMaterializations = - VolcanoPlanner.getApplicableMaterializations(root, materializations); + RelOptMaterializations.getApplicableMaterializations(root, materializations); for (RelOptMaterialization materialization : applicableMaterializations) { List subs = new MaterializedViewSubstitutionVisitor( materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java index 69e157e..9bcdd0c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java @@ -165,7 +165,7 @@ public RelOptPredicateList getPredicates(Project project, RelMetadataQuery mq) { rexBuilder.makeInputRef(project, expr.i), expr.e)); } } - return RelOptPredicateList.of(projectPullUpPredicates); + return RelOptPredicateList.of(rexBuilder, projectPullUpPredicates); } /** Infers predicates for a {@link org.apache.calcite.rel.core.Join}. */ @@ -202,6 +202,7 @@ public RelOptPredicateList getPredicates(Aggregate agg, RelMetadataQuery mq) { final RelNode input = agg.getInput(); final RelOptPredicateList inputInfo = mq.getPulledUpPredicates(input); final List aggPullUpPredicates = new ArrayList<>(); + final RexBuilder rexBuilder = agg.getCluster().getRexBuilder(); ImmutableBitSet groupKeys = agg.getGroupSet(); Mapping m = Mappings.create(MappingType.PARTIAL_FUNCTION, @@ -219,7 +220,7 @@ public RelOptPredicateList getPredicates(Aggregate agg, RelMetadataQuery mq) { aggPullUpPredicates.add(r); } } - return RelOptPredicateList.of(aggPullUpPredicates); + return RelOptPredicateList.of(rexBuilder, aggPullUpPredicates); } /** @@ -271,7 +272,7 @@ public RelOptPredicateList getPredicates(Union union, RelMetadataQuery mq) { if (!disjPred.isAlwaysTrue()) { preds.add(disjPred); } - return RelOptPredicateList.of(preds); + return RelOptPredicateList.of(rB, preds); } /** @@ -411,6 +412,7 @@ public RelOptPredicateList inferPredicates( final JoinRelType joinType = joinRel.getJoinType(); final List leftPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(leftChildPredicates)); final List rightPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(rightChildPredicates)); + final RexBuilder rexBuilder = joinRel.getCluster().getRexBuilder(); switch (joinType) { case INNER: case LEFT: @@ -476,13 +478,13 @@ public RelOptPredicateList inferPredicates( pulledUpPredicates = Iterables.concat(leftPreds, rightPreds, RelOptUtil.conjunctions(joinRel.getCondition()), inferredPredicates); } - return RelOptPredicateList.of( + return RelOptPredicateList.of(rexBuilder, pulledUpPredicates, leftInferredPredicates, rightInferredPredicates); case LEFT: - return RelOptPredicateList.of( + return RelOptPredicateList.of(rexBuilder, leftPreds, EMPTY_LIST, rightInferredPredicates); case RIGHT: - return RelOptPredicateList.of( + return RelOptPredicateList.of(rexBuilder, rightPreds, leftInferredPredicates, EMPTY_LIST); default: assert inferredPredicates.size() == 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 96ff5df..cca03c8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -50,6 +50,7 @@ import org.apache.calcite.adapter.druid.DruidRules; import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; +import org.apache.calcite.adapter.druid.LocalInterval; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.RelOptPlanner; @@ -2175,7 +2176,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc } metrics.add(field.getName()); } - List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); + List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); @@ -4062,5 +4063,4 @@ private QBParseInfo getQBParseInfo(QB qb) throws CalciteSemanticException { DRUID, NATIVE } - }