diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml
index ca5028d..9fe829d 100644
--- a/druid-handler/pom.xml
+++ b/druid-handler/pom.xml
@@ -212,6 +212,17 @@
org.apache.calcite
calcite-druid
${calcite.version}
+
+
+ org.apache.calcite.avatica
+ avatica-core
+
+
+
+
+ org.apache.calcite.avatica
+ avatica
+ ${avatica.version}
diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
index 8b37840..d370777 100644
--- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
+++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java
@@ -158,7 +158,11 @@ private static String createSelectStarQuery(String dataSource) throws IOExceptio
// Create Select query
SelectQueryBuilder builder = new Druids.SelectQueryBuilder();
builder.dataSource(dataSource);
- builder.intervals(Arrays.asList(DruidTable.DEFAULT_INTERVAL));
+ final List intervals = Arrays.asList();
+ new Interval(DruidTable.DEFAULT_INTERVAL.getStartMillis(),
+ DruidTable.DEFAULT_INTERVAL.getEndMillis(),
+ ISOChronology.getInstanceUTC());
+ builder.intervals(intervals);
builder.pagingSpec(PagingSpec.newSpec(1));
Map context = new HashMap<>();
context.put(Constants.DRUID_QUERY_FETCH, false);
@@ -327,11 +331,15 @@ private static String createSelectStarQuery(String dataSource) throws IOExceptio
private static List> createSplitsIntervals(List intervals, int numSplits
) {
- final long totalTime = DruidDateTimeUtils.extractTotalTime(intervals);
+
long startTime = intervals.get(0).getStartMillis();
long endTime = startTime;
long currTime = 0;
List> newIntervals = new ArrayList<>();
+ long totalTime = 0;
+ for (Interval interval: intervals) {
+ totalTime += interval.getEndMillis() - interval.getStartMillis();
+ }
for (int i = 0, posIntervals = 0; i < numSplits; i++) {
final long rangeSize = Math.round((double) (totalTime * (i + 1)) / numSplits) -
Math.round((double) (totalTime * i) / numSplits);
diff --git a/pom.xml b/pom.xml
index 3c46ad1..d6e6789 100644
--- a/pom.xml
+++ b/pom.xml
@@ -112,10 +112,10 @@
3.5.2
1.5.6
0.1
- 1.8.0
+ 1.10.0-SNAPSHOT
1.7.7
0.8.0.RELEASE
- 1.10.0
+ 1.12.0-SNAPSHOT
4.2.1
4.1.6
4.1.7
diff --git a/ql/pom.xml b/ql/pom.xml
index 7db0ede..c541538 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -379,12 +379,22 @@
com.fasterxml.jackson.core
jackson-core
+
+ org.apache.calcite.avatica
+ avatica-core
+
org.apache.calcite
calcite-druid
${calcite.version}
+
+
+ org.apache.calcite.avatica
+ avatica-core
+
+
org.apache.calcite.avatica
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index 89c87cd..02a9b4a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -34,6 +34,7 @@
import org.apache.calcite.adapter.druid.DruidQuery;
import org.apache.calcite.adapter.druid.DruidSchema;
import org.apache.calcite.adapter.druid.DruidTable;
+import org.apache.calcite.adapter.druid.LocalInterval;
import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptMaterialization;
@@ -310,7 +311,7 @@ private static RelNode createTableScan(Table viewTable) {
}
metrics.add(field.getName());
}
- List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
+ List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
index 9a65de3..d0b1757 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HivePlannerContext.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hive.ql.optimizer.calcite;
+import org.apache.calcite.config.CalciteConnectionConfig;
import org.apache.calcite.plan.Context;
import org.apache.calcite.rel.RelNode;
import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveAlgorithmsConf;
@@ -27,11 +28,14 @@
public class HivePlannerContext implements Context {
private HiveAlgorithmsConf algoConfig;
private HiveRulesRegistry registry;
+ private CalciteConnectionConfig calciteConfig;
private Set corrScalarRexSQWithAgg;
- public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry, Set corrScalarRexSQWithAgg) {
+ public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry registry,
+ CalciteConnectionConfig calciteConfig, Set corrScalarRexSQWithAgg) {
this.algoConfig = algoConfig;
this.registry = registry;
+ this.calciteConfig = calciteConfig;
// this is to keep track if a subquery is correlated and contains aggregate
// this is computed in CalcitePlanner while planning and is later required by subuery remove rule
// hence this is passed using HivePlannerContext
@@ -45,6 +49,9 @@ public HivePlannerContext(HiveAlgorithmsConf algoConfig, HiveRulesRegistry regis
if (clazz.isInstance(registry)) {
return clazz.cast(registry);
}
+ if (clazz.isInstance(calciteConfig)) {
+ return clazz.cast(calciteConfig);
+ }
if(clazz.isInstance(corrScalarRexSQWithAgg)) {
return clazz.cast(corrScalarRexSQWithAgg);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
index 4edc4df..0b94b8a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveExtractDate.java
@@ -24,6 +24,7 @@
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.type.SqlTypeTransforms;
import com.google.common.collect.Sets;
@@ -42,9 +43,10 @@
Sets.newHashSet(YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND);
private HiveExtractDate(String name) {
- super(name, SqlKind.EXTRACT, ReturnTypes.INTEGER_NULLABLE, null,
- OperandTypes.INTERVALINTERVAL_INTERVALDATETIME,
- SqlFunctionCategory.SYSTEM);
+ super(name, SqlKind.EXTRACT,
+ ReturnTypes.cascade(ReturnTypes.INTEGER, SqlTypeTransforms.FORCE_NULLABLE), null,
+ OperandTypes.INTERVALINTERVAL_INTERVALDATETIME,
+ SqlFunctionCategory.SYSTEM);
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
index 38d7906..81de33f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
@@ -21,6 +21,7 @@
import java.util.List;
import org.apache.calcite.plan.RelOptMaterialization;
+import org.apache.calcite.plan.RelOptMaterializations;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelOptRuleCall;
@@ -77,7 +78,7 @@ protected void apply(RelOptRuleCall call, Project project, Filter filter, TableS
// Costing is done in transformTo(), so we call it repeatedly with all applicable
// materialized views and cheapest one will be picked
List applicableMaterializations =
- VolcanoPlanner.getApplicableMaterializations(root, materializations);
+ RelOptMaterializations.getApplicableMaterializations(root, materializations);
for (RelOptMaterialization materialization : applicableMaterializations) {
List subs = new MaterializedViewSubstitutionVisitor(
materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index 69e157e..9bcdd0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -165,7 +165,7 @@ public RelOptPredicateList getPredicates(Project project, RelMetadataQuery mq) {
rexBuilder.makeInputRef(project, expr.i), expr.e));
}
}
- return RelOptPredicateList.of(projectPullUpPredicates);
+ return RelOptPredicateList.of(rexBuilder, projectPullUpPredicates);
}
/** Infers predicates for a {@link org.apache.calcite.rel.core.Join}. */
@@ -202,6 +202,7 @@ public RelOptPredicateList getPredicates(Aggregate agg, RelMetadataQuery mq) {
final RelNode input = agg.getInput();
final RelOptPredicateList inputInfo = mq.getPulledUpPredicates(input);
final List aggPullUpPredicates = new ArrayList<>();
+ final RexBuilder rexBuilder = agg.getCluster().getRexBuilder();
ImmutableBitSet groupKeys = agg.getGroupSet();
Mapping m = Mappings.create(MappingType.PARTIAL_FUNCTION,
@@ -219,7 +220,7 @@ public RelOptPredicateList getPredicates(Aggregate agg, RelMetadataQuery mq) {
aggPullUpPredicates.add(r);
}
}
- return RelOptPredicateList.of(aggPullUpPredicates);
+ return RelOptPredicateList.of(rexBuilder, aggPullUpPredicates);
}
/**
@@ -271,7 +272,7 @@ public RelOptPredicateList getPredicates(Union union, RelMetadataQuery mq) {
if (!disjPred.isAlwaysTrue()) {
preds.add(disjPred);
}
- return RelOptPredicateList.of(preds);
+ return RelOptPredicateList.of(rB, preds);
}
/**
@@ -411,6 +412,7 @@ public RelOptPredicateList inferPredicates(
final JoinRelType joinType = joinRel.getJoinType();
final List leftPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(leftChildPredicates));
final List rightPreds = ImmutableList.copyOf(RelOptUtil.conjunctions(rightChildPredicates));
+ final RexBuilder rexBuilder = joinRel.getCluster().getRexBuilder();
switch (joinType) {
case INNER:
case LEFT:
@@ -476,13 +478,13 @@ public RelOptPredicateList inferPredicates(
pulledUpPredicates = Iterables.concat(leftPreds, rightPreds,
RelOptUtil.conjunctions(joinRel.getCondition()), inferredPredicates);
}
- return RelOptPredicateList.of(
+ return RelOptPredicateList.of(rexBuilder,
pulledUpPredicates, leftInferredPredicates, rightInferredPredicates);
case LEFT:
- return RelOptPredicateList.of(
+ return RelOptPredicateList.of(rexBuilder,
leftPreds, EMPTY_LIST, rightInferredPredicates);
case RIGHT:
- return RelOptPredicateList.of(
+ return RelOptPredicateList.of(rexBuilder,
rightPreds, leftInferredPredicates, EMPTY_LIST);
default:
assert inferredPredicates.size() == 0;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index e36e1bd..5f5e860 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -37,6 +37,8 @@
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
class ASTBuilder {
@@ -269,19 +271,21 @@ static ASTNode literal(RexLiteral literal, boolean useTypeQualInLiteral) {
type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE;
break;
case DATE: {
- val = literal.getValue();
+ final Calendar c = (Calendar) literal.getValue();
+ final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
type = HiveParser.TOK_DATELITERAL;
DateFormat df = new SimpleDateFormat("yyyy-MM-dd");
- val = df.format(((Calendar) val).getTime());
+ val = df.format(dt.withZoneRetainFields(DateTimeZone.getDefault()).toDate());
val = "'" + val + "'";
}
break;
case TIME:
case TIMESTAMP: {
- val = literal.getValue();
+ final Calendar c = (Calendar) literal.getValue();
+ final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
type = HiveParser.TOK_TIMESTAMPLITERAL;
DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
- val = df.format(((Calendar) val).getTime());
+ val = df.format(dt.withZoneRetainFields(DateTimeZone.getDefault()).toDate());
val = "'" + val + "'";
}
break;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index 27990a2..165f8c4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -24,7 +24,6 @@
import java.util.Map;
import org.apache.calcite.adapter.druid.DruidQuery;
-import org.apache.calcite.avatica.util.TimeUnitRange;
import org.apache.calcite.rel.RelFieldCollation;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.RelVisitor;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index e840938..b1efbbd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
import java.math.BigDecimal;
-import java.sql.Date;
import java.sql.Timestamp;
import java.util.ArrayList;
import java.util.Calendar;
@@ -75,6 +74,8 @@
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -291,16 +292,17 @@ public ExprNodeDesc visitLiteral(RexLiteral literal) {
case DOUBLE:
return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo,
Double.valueOf(((Number) literal.getValue3()).doubleValue()));
- case DATE:
+ case DATE: {
+ final Calendar c = (Calendar) literal.getValue();
return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
- new Date(((Calendar)literal.getValue()).getTimeInMillis()));
+ new java.sql.Date(c.getTimeInMillis()));
+ }
case TIME:
case TIMESTAMP: {
- Object value = literal.getValue3();
- if (value instanceof Long) {
- value = new Timestamp((Long)value);
- }
- return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, value);
+ final Calendar c = (Calendar) literal.getValue();
+ final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
+ return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
+ new Timestamp(dt.getMillis()));
}
case BINARY:
return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index a05b89c..26c3bc8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -23,11 +23,12 @@
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
-import java.util.GregorianCalendar;
import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
+import org.apache.calcite.avatica.util.DateTimeUtils;
import org.apache.calcite.avatica.util.TimeUnit;
import org.apache.calcite.avatica.util.TimeUnitRange;
import org.apache.calcite.plan.RelOptCluster;
@@ -38,8 +39,8 @@
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.rex.RexCall;
import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
import org.apache.calcite.rex.RexSubQuery;
+import org.apache.calcite.rex.RexUtil;
import org.apache.calcite.sql.SqlCollation;
import org.apache.calcite.sql.SqlIntervalQualifier;
import org.apache.calcite.sql.SqlKind;
@@ -96,6 +97,8 @@
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
@@ -634,20 +637,22 @@ protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticEx
calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
break;
case DATE:
- Calendar cal = new GregorianCalendar();
- cal.setTime((Date) value);
- calciteLiteral = rexBuilder.makeDateLiteral(cal);
- break;
- case TIMESTAMP:
- Calendar c = null;
- if (value instanceof Calendar) {
- c = (Calendar)value;
- } else {
- c = Calendar.getInstance();
- c.setTimeInMillis(((Timestamp)value).getTime());
- }
- calciteLiteral = rexBuilder.makeTimestampLiteral(c, RelDataType.PRECISION_NOT_SPECIFIED);
- break;
+ final Calendar cal = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
+ cal.setTime((Date) value);
+ calciteLiteral = rexBuilder.makeDateLiteral(cal);
+ break;
+ case TIMESTAMP:
+ final Calendar calt = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
+ if (value instanceof Calendar) {
+ final Calendar c = (Calendar) value;
+ long timeMs = c.getTimeInMillis();
+ calt.setTimeInMillis(timeMs - c.getTimeZone().getOffset(timeMs));
+ } else {
+ final Timestamp ts = (Timestamp) value;
+ calt.setTimeInMillis(ts.getTime() - (ts.getTimezoneOffset() * 60 * 1000));
+ }
+ calciteLiteral = rexBuilder.makeTimestampLiteral(calt, RelDataType.PRECISION_NOT_SPECIFIED);
+ break;
case INTERVAL_YEAR_MONTH:
// Calcite year-month literal value is months as BigDecimal
BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index e7687be..c543956 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -38,6 +38,7 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
+import java.util.Properties;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -50,6 +51,9 @@
import org.apache.calcite.adapter.druid.DruidRules;
import org.apache.calcite.adapter.druid.DruidSchema;
import org.apache.calcite.adapter.druid.DruidTable;
+import org.apache.calcite.adapter.druid.LocalInterval;
+import org.apache.calcite.config.CalciteConnectionConfig;
+import org.apache.calcite.config.CalciteConnectionConfigImpl;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptMaterialization;
import org.apache.calcite.plan.RelOptPlanner;
@@ -1263,7 +1267,9 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu
conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD);
HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory);
HiveRulesRegistry registry = new HiveRulesRegistry();
- HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, corrScalarRexSQWithAgg);
+ CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(new Properties());
+ HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry, calciteConfig,
+ corrScalarRexSQWithAgg);
RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext);
final RexBuilder rexBuilder = cluster.getRexBuilder();
final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder);
@@ -2213,7 +2219,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc
}
metrics.add(field.getName());
}
- List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
+ List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
@@ -4108,5 +4114,4 @@ private QBParseInfo getQBParseInfo(QB qb) throws CalciteSemanticException {
DRUID,
NATIVE
}
-
}
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
index 7229cc7..4823950 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/TestCBORuleFiredOnlyOnce.java
@@ -61,7 +61,7 @@ public void testRuleFiredOnlyOnce() {
// Create rules registry to not trigger a rule more than once
HiveRulesRegistry registry = new HiveRulesRegistry();
- HivePlannerContext context = new HivePlannerContext(null, registry, null);
+ HivePlannerContext context = new HivePlannerContext(null, registry, null, null);
HepPlanner planner = new HepPlanner(programBuilder.build(), context);
// Cluster
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
index bc9410b..d54d0a5 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -77,7 +77,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -104,7 +104,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":[],"metrics":["delta"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":["delta"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -135,7 +135,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -166,7 +166,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -212,7 +212,7 @@ STAGE PLANS:
alias: druid_table_1
filterExpr: language is not null (type: boolean)
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -237,7 +237,7 @@ STAGE PLANS:
alias: druid_table_1
filterExpr: language is not null (type: boolean)
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -274,7 +274,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
#### A masked pattern was here ####
name default.druid_table_1
@@ -300,7 +300,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
#### A masked pattern was here ####
name default.druid_table_1
@@ -399,7 +399,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
GatherStats: false
@@ -414,7 +414,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -442,7 +442,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
#### A masked pattern was here ####
name default.druid_table_1
@@ -468,7 +468,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"filter":{"type":"selector","dimension":"language","value":"en"},"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
#### A masked pattern was here ####
name default.druid_table_1
@@ -545,8 +545,7 @@ LIMIT 10
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
Stage-1 is a root stage
- Stage-2 depends on stages: Stage-1
- Stage-0 depends on stages: Stage-2
+ Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
@@ -554,27 +553,25 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: druid_table_1
+ properties:
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
Select Operator
- expressions: robot (type: string), language (type: string), __time (type: timestamp), added (type: float), delta (type: float)
- outputColumnNames: robot, language, __time, added, delta
+ expressions: robot (type: string), __time (type: timestamp), $f3 (type: bigint), $f4 (type: float)
+ outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Group By Operator
- aggregations: max(added), sum(delta)
- keys: robot (type: string), language (type: string), floor_day(__time) (type: timestamp)
- mode: hash
- outputColumnNames: _col0, _col1, _col2, _col3, _col4
+ Reduce Output Operator
+ key expressions: UDFToInteger(_col0) (type: int), _col2 (type: bigint)
+ null sort order: az
+ sort order: +-
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp)
- null sort order: aaa
- sort order: +++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: timestamp)
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- tag: -1
- value expressions: _col3 (type: float), _col4 (type: double)
- auto parallelism: false
+ tag: -1
+ TopN: 10
+ TopN Hash Memory Usage: 0.1
+ value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: float)
+ auto parallelism: false
Path -> Alias:
#### A masked pattern was here ####
Path -> Partition:
@@ -592,6 +589,8 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
@@ -616,6 +615,8 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"longMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
numFiles 0
@@ -634,81 +635,8 @@ STAGE PLANS:
/druid_table_1 [druid_table_1]
Needs Tagging: false
Reduce Operator Tree:
- Group By Operator
- aggregations: max(VALUE._col0), sum(VALUE._col1)
- keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: timestamp)
- mode: mergepartial
- outputColumnNames: _col0, _col1, _col2, _col3, _col4
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- Select Operator
- expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: double)
- outputColumnNames: _col0, _col1, _col2, _col3
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- File Output Operator
- compressed: false
- GlobalTableId: 0
-#### A masked pattern was here ####
- NumFilesPerFileSink: 1
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- properties:
- column.name.delimiter ,
- columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
- escape.delim \
- serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
- TotalFiles: 1
- GatherStats: false
- MultiFileSpray: false
-
- Stage: Stage-2
- Map Reduce
- Map Operator Tree:
- TableScan
- GatherStats: false
- Reduce Output Operator
- key expressions: UDFToInteger(_col0) (type: int), _col2 (type: float)
- null sort order: az
- sort order: +-
- Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
- tag: -1
- TopN: 10
- TopN Hash Memory Usage: 0.1
- value expressions: _col0 (type: string), _col1 (type: timestamp), _col3 (type: double)
- auto parallelism: false
- Path -> Alias:
-#### A masked pattern was here ####
- Path -> Partition:
-#### A masked pattern was here ####
- Partition
- base file name: -mr-10004
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- properties:
- column.name.delimiter ,
- columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
- escape.delim \
- serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- properties:
- column.name.delimiter ,
- columns _col0,_col1,_col2,_col3
- columns.types string,timestamp,float,double
- escape.delim \
- serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
- serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
- Truncated Path -> Alias:
-#### A masked pattern was here ####
- Needs Tagging: false
- Reduce Operator Tree:
Select Operator
- expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: double)
+ expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: bigint), VALUE._col2 (type: float)
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Limit
@@ -726,7 +654,7 @@ STAGE PLANS:
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
properties:
columns _col0,_col1,_col2,_col3
- columns.types string:timestamp:float:double
+ columns.types string:timestamp:bigint:float
escape.delim \
hive.serialization.extend.additional.nesting.levels true
serialization.escape.crlf true
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index 6b2ffe9..372927b 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"longMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"all","aggregations":[{"type":"longMax","name":"$f0","fieldName":"added"},{"type":"doubleSum","name":"$f1","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -109,7 +109,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"NONE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"none","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -139,7 +139,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"YEAR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"year","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -169,7 +169,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"QUARTER","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"quarter","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -199,7 +199,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"MONTH","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"month","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -229,7 +229,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"WEEK","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"week","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -259,7 +259,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"DAY","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"day","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -289,7 +289,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"HOUR","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"hour","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -319,7 +319,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"MINUTE","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"minute","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -349,7 +349,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"SECOND","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"second","aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -381,7 +381,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"HOUR","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"]}
+ druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"hour","filter":{"type":"selector","dimension":"robot","value":"1"},"aggregations":[{"type":"longMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
druid.query.type timeseries
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -418,7 +418,7 @@ STAGE PLANS:
alias: druid_table_1
filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
@@ -495,7 +495,7 @@ STAGE PLANS:
alias: druid_table_1
filterExpr: floor_hour(__time) BETWEEN 2010-01-01 00:00:00.0 AND 2014-01-01 00:00:00.0 (type: boolean)
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot","namespace","anonymous","unpatrolled","page","language","newpage","user"],"metrics":["count","added","delta","variation","deleted"],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator