aggCalls) {
- return new HiveAggregate(getCluster(), traitSet, input, indicator, groupSet,
- groupSets, aggCalls);
+ if (indicator) {
+ throw new IllegalStateException("Hive does not support indicator columns but tried "
+ + "to create an Aggregate operator containing them");
+ }
+ return new HiveAggregate(getCluster(), traitSet, input,
+ groupSet, groupSets, aggCalls);
}
@Override
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java
index 2fb9508..adcda26 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveGroupingID.java
@@ -17,17 +17,17 @@
*/
package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
-import org.apache.calcite.sql.SqlAggFunction;
import org.apache.calcite.sql.SqlFunctionCategory;
import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.fun.SqlAbstractGroupFunction;
import org.apache.calcite.sql.type.InferTypes;
import org.apache.calcite.sql.type.OperandTypes;
import org.apache.calcite.sql.type.ReturnTypes;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-public class HiveGroupingID extends SqlAggFunction {
+public class HiveGroupingID extends SqlAbstractGroupFunction {
- public static final SqlAggFunction INSTANCE =
+ public static final HiveGroupingID INSTANCE =
new HiveGroupingID();
private HiveGroupingID() {
@@ -36,7 +36,7 @@ private HiveGroupingID() {
ReturnTypes.INTEGER,
InferTypes.BOOLEAN,
OperandTypes.NILADIC,
- SqlFunctionCategory.USER_DEFINED_FUNCTION);
+ SqlFunctionCategory.SYSTEM);
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
index 7c3b4b0..ec182cd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
@@ -16,27 +16,15 @@
*/
package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.Aggregate;
import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.calcite.rel.rules.AggregateProjectMergeRule;
import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
/**
* Planner rule that recognizes a {@link HiveAggregate}
* on top of a {@link HiveProject} and if possible
@@ -48,15 +36,13 @@
* In some cases, this rule has the effect of trimming: the aggregate will
* use fewer columns than the project did.
*/
-public class HiveAggregateProjectMergeRule extends RelOptRule {
+public class HiveAggregateProjectMergeRule extends AggregateProjectMergeRule {
public static final HiveAggregateProjectMergeRule INSTANCE =
new HiveAggregateProjectMergeRule();
/** Private constructor. */
private HiveAggregateProjectMergeRule() {
- super(
- operand(HiveAggregate.class,
- operand(HiveProject.class, any())));
+ super(HiveAggregate.class, HiveProject.class, HiveRelFactories.HIVE_BUILDER);
}
@Override
@@ -72,97 +58,6 @@ public boolean matches(RelOptRuleCall call) {
return super.matches(call);
}
- @Override
- public void onMatch(RelOptRuleCall call) {
- final HiveAggregate aggregate = call.rel(0);
- final HiveProject project = call.rel(1);
- RelNode x = apply(aggregate, project);
- if (x != null) {
- call.transformTo(x);
- }
- }
-
- public static RelNode apply(HiveAggregate aggregate,
- HiveProject project) {
- final List newKeys = Lists.newArrayList();
- final Map map = new HashMap<>();
- for (int key : aggregate.getGroupSet()) {
- final RexNode rex = project.getProjects().get(key);
- if (rex instanceof RexInputRef) {
- final int newKey = ((RexInputRef) rex).getIndex();
- newKeys.add(newKey);
- map.put(key, newKey);
- } else {
- // Cannot handle "GROUP BY expression"
- return null;
- }
- }
-
- final ImmutableBitSet newGroupSet = aggregate.getGroupSet().permute(map);
- ImmutableList newGroupingSets = null;
- if (aggregate.indicator) {
- newGroupingSets =
- ImmutableBitSet.ORDERING.immutableSortedCopy(
- ImmutableBitSet.permute(aggregate.getGroupSets(), map));
- }
-
- final ImmutableList.Builder aggCalls =
- ImmutableList.builder();
- for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
- final ImmutableList.Builder newArgs = ImmutableList.builder();
- for (int arg : aggregateCall.getArgList()) {
- final RexNode rex = project.getProjects().get(arg);
- if (rex instanceof RexInputRef) {
- newArgs.add(((RexInputRef) rex).getIndex());
- } else {
- // Cannot handle "AGG(expression)"
- return null;
- }
- }
- final int newFilterArg;
- if (aggregateCall.filterArg >= 0) {
- final RexNode rex = project.getProjects().get(aggregateCall.filterArg);
- if (!(rex instanceof RexInputRef)) {
- return null;
- }
- newFilterArg = ((RexInputRef) rex).getIndex();
- } else {
- newFilterArg = -1;
- }
- aggCalls.add(aggregateCall.copy(newArgs.build(), newFilterArg));
- }
-
- final Aggregate newAggregate =
- aggregate.copy(aggregate.getTraitSet(), project.getInput(),
- aggregate.indicator, newGroupSet, newGroupingSets,
- aggCalls.build());
-
- // Add a project if the group set is not in the same order or
- // contains duplicates.
- RelNode rel = newAggregate;
- if (!newKeys.equals(newGroupSet.asList())) {
- final List posList = Lists.newArrayList();
- for (int newKey : newKeys) {
- posList.add(newGroupSet.indexOf(newKey));
- }
- if (aggregate.indicator) {
- for (int newKey : newKeys) {
- posList.add(aggregate.getGroupCount() + newGroupSet.indexOf(newKey));
- }
- }
- for (int i = newAggregate.getGroupCount()
- + newAggregate.getIndicatorCount();
- i < newAggregate.getRowType().getFieldCount(); i++) {
- posList.add(i);
- }
- rel = HiveRelOptUtil.createProject(
- HiveRelFactories.HIVE_BUILDER.create(aggregate.getCluster(), null),
- rel, posList);
-
- }
-
- return rel;
- }
}
-// End AggregateProjectMergeRule.java
+// End HiveAggregateProjectMergeRule.java
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
index b63ea02..5b72dbd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
@@ -27,7 +27,6 @@
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Aggregate;
import org.apache.calcite.rel.core.AggregateCall;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeField;
@@ -35,17 +34,13 @@
import org.apache.calcite.rex.RexInputRef;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlOperator;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.calcite.util.ImmutableBitSet;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExcept;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan;
@@ -53,12 +48,7 @@
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -169,7 +159,7 @@ public void onMatch(RelOptRuleCall call) {
final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
HiveRelNode aggregateRel = new HiveAggregate(cluster,
- cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, false, groupSet, null,
+ cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null,
aggregateCalls);
// the schema after GB is like this
@@ -270,7 +260,7 @@ private RelNode createFirstGB(RelNode input, boolean left, RelOptCluster cluster
TypeInfoFactory.longTypeInfo, input.getRowType().getFieldList().size(), aggFnRetType);
aggregateCalls.add(aggregateCall);
return new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel,
- false, groupSet, null, aggregateCalls);
+ groupSet, null, aggregateCalls);
}
private RexNode multiply(RexNode r1, RexNode r2, RelOptCluster cluster, RexBuilder rexBuilder)
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
index 81bb730..89c5c23 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
@@ -268,7 +268,7 @@ public RexNode apply(RelDataTypeField input) {
aggregateCalls.add(aggregateCall);
}
Aggregate aggregate = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel,
- false, ImmutableBitSet.of(), null, aggregateCalls);
+ ImmutableBitSet.of(), null, aggregateCalls);
// create the project after GB. For those repeated values, e.g., select
// count(distinct x, y), count(distinct y, x), we find the correct mapping.
@@ -330,7 +330,7 @@ private Aggregate createGroupingSets(Aggregate aggregate, List> ar
.createSqlType(SqlTypeName.INTEGER), HiveGroupingID.INSTANCE.getName());
aggregateCalls.add(aggCall);
return new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
- aggregate.getInput(), true, groupSet, origGroupSets, aggregateCalls);
+ aggregate.getInput(), groupSet, origGroupSets, aggregateCalls);
}
/**
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java
index 5b0a7d7..a5d950a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveIntersectRewriteRule.java
@@ -27,18 +27,14 @@
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelOptRuleCall;
import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Aggregate;
import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.Intersect;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rex.RexBuilder;
import org.apache.calcite.rex.RexInputRef;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.util.ImmutableBitSet;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
@@ -52,16 +48,10 @@
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Util;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.esotericsoftware.minlog.Log;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableList.Builder;
@@ -140,7 +130,7 @@ public void onMatch(RelOptRuleCall call) {
aggregateCalls.add(aggregateCall);
HiveRelNode aggregateRel = new HiveAggregate(cluster,
- cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, false, groupSet, null,
+ cluster.traitSetOf(HiveRelNode.CONVENTION), gbInputRel, groupSet, null,
aggregateCalls);
bldr.add(aggregateRel);
}
@@ -173,7 +163,7 @@ public void onMatch(RelOptRuleCall call) {
final ImmutableBitSet groupSet = ImmutableBitSet.of(groupSetPositions);
HiveRelNode aggregateRel = new HiveAggregate(cluster,
- cluster.traitSetOf(HiveRelNode.CONVENTION), union, false, groupSet, null, aggregateCalls);
+ cluster.traitSetOf(HiveRelNode.CONVENTION), union, groupSet, null, aggregateCalls);
// add a filter count(c) = #branches
int countInd = cInd;
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
index 0fd3217..c94520a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
@@ -17,6 +17,24 @@
*/
package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Objects;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.Stack;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import javax.annotation.Nonnull;
+
import org.apache.calcite.linq4j.Ord;
import org.apache.calcite.linq4j.function.Function2;
import org.apache.calcite.plan.Context;
@@ -85,6 +103,7 @@
import org.apache.calcite.util.Stacks;
import org.apache.calcite.util.Util;
import org.apache.calcite.util.mapping.Mappings;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelShuttleImpl;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIntersect;
@@ -92,6 +111,7 @@
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -107,25 +127,6 @@
import com.google.common.collect.Multimaps;
import com.google.common.collect.Sets;
import com.google.common.collect.SortedSetMultimap;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelShuttleImpl;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Objects;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.Stack;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import javax.annotation.Nonnull;
/**
* NOTE: this whole logic is replicated from Calcite's RelDecorrelator
@@ -838,7 +839,7 @@ public Frame decorrelateRel(HiveAggregate rel) throws SemanticException{
}
relBuilder.push(
- new HiveAggregate(rel.getCluster(), rel.getTraitSet(), newProject, false, newGroupSet, null, newAggCalls) );
+ new HiveAggregate(rel.getCluster(), rel.getTraitSet(), newProject, newGroupSet, null, newAggCalls) );
if (!omittedConstants.isEmpty()) {
final List postProjects = new ArrayList<>(relBuilder.fields());
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index 165f8c4..162b3e3 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -59,8 +59,6 @@
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveExtractDate;
-import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFloorDate;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveGroupingID;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableFunctionScan;
@@ -130,22 +128,23 @@ private ASTNode convert() throws CalciteSemanticException {
if (groupBy != null) {
ASTBuilder b;
boolean groupingSetsExpression = false;
- if (groupBy.indicator) {
- Group aggregateType = Aggregate.Group.induce(groupBy.getGroupSet(),
- groupBy.getGroupSets());
- if (aggregateType == Group.ROLLUP) {
+ Group aggregateType = groupBy.getGroupType();
+ switch (aggregateType) {
+ case SIMPLE:
+ b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY");
+ break;
+ case ROLLUP:
b = ASTBuilder.construct(HiveParser.TOK_ROLLUP_GROUPBY, "TOK_ROLLUP_GROUPBY");
- }
- else if (aggregateType == Group.CUBE) {
+ break;
+ case CUBE:
b = ASTBuilder.construct(HiveParser.TOK_CUBE_GROUPBY, "TOK_CUBE_GROUPBY");
- }
- else {
+ break;
+ case OTHER:
b = ASTBuilder.construct(HiveParser.TOK_GROUPING_SETS, "TOK_GROUPING_SETS");
groupingSetsExpression = true;
- }
- }
- else {
- b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY");
+ break;
+ default:
+ throw new CalciteSemanticException("Group type not recognized");
}
HiveAggregate hiveAgg = (HiveAggregate) groupBy;
@@ -197,14 +196,15 @@ else if (aggregateType == Group.CUBE) {
ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT");
if (select instanceof Project) {
- if (select.getChildExps().isEmpty()) {
+ List childExps = ((Project) select).getChildExps();
+ if (childExps.isEmpty()) {
RexLiteral r = select.getCluster().getRexBuilder().makeExactLiteral(new BigDecimal(1));
ASTNode selectExpr = ASTBuilder.selectExpr(ASTBuilder.literal(r), "1");
b.add(selectExpr);
} else {
int i = 0;
- for (RexNode r : select.getChildExps()) {
+ for (RexNode r : childExps) {
if (RexUtil.isNull(r) && r.getType().getSqlTypeName() != SqlTypeName.NULL) {
// It is NULL value with different type, we need to introduce a CAST
// to keep it
@@ -739,15 +739,6 @@ public QueryBlockInfo(Schema schema, ASTNode ast) {
ColumnInfo cI = src.get(i);
add(cI);
}
- // If we are using grouping sets, we add the
- // fields again, these correspond to the boolean
- // grouping in Calcite. They are not used by Hive.
- if(gBy.indicator) {
- for (int i : gBy.getGroupSet()) {
- ColumnInfo cI = src.get(i);
- add(cI);
- }
- }
List aggs = gBy.getAggCallList();
for (AggregateCall agg : aggs) {
if (agg.getAggregation() == HiveGroupingID.INSTANCE) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index 3dcceab..b0c8504 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -45,13 +45,12 @@
import org.apache.calcite.util.DateString;
import org.apache.calcite.util.TimeString;
import org.apache.calcite.util.TimestampString;
+import org.apache.calcite.util.TimestampWithTimeZoneString;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
-import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveType;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema;
import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -244,6 +243,8 @@ public ExprNodeDesc visitLiteral(RexLiteral literal) {
case TIME:
case TIMESTAMP:
return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, null);
+ case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+ return new ExprNodeConstantDesc(TypeInfoFactory.timestampLocalTZTypeInfo, null);
case BINARY:
return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, null);
case DECIMAL:
@@ -267,12 +268,7 @@ public ExprNodeDesc visitLiteral(RexLiteral literal) {
case INTERVAL_MINUTE_SECOND:
case INTERVAL_SECOND:
return new ExprNodeConstantDesc(TypeInfoFactory.intervalDayTimeTypeInfo, null);
- case NULL:
- case OTHER:
default:
- if (lType instanceof HiveType && ((HiveType) lType).getTypeClass() == TimestampTZ.class) {
- return new ExprNodeConstantDesc(TypeInfoFactory.timestampLocalTZTypeInfo, null);
- }
return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, null);
}
} else {
@@ -308,6 +304,9 @@ public ExprNodeDesc visitLiteral(RexLiteral literal) {
case TIMESTAMP:
return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
Timestamp.valueOf(literal.getValueAs(TimestampString.class).toString()));
+ case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+ return new ExprNodeConstantDesc(TypeInfoFactory.timestampLocalTZTypeInfo,
+ literal.getValueAs(TimestampWithTimeZoneString.class).toString());
case BINARY:
return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3());
case DECIMAL:
@@ -340,12 +339,7 @@ public ExprNodeDesc visitLiteral(RexLiteral literal) {
return new ExprNodeConstantDesc(TypeInfoFactory.intervalDayTimeTypeInfo,
new HiveIntervalDayTime(secsBd));
}
- case NULL:
- case OTHER:
default:
- if (lType instanceof HiveType && ((HiveType) lType).getTypeClass() == TimestampTZ.class) {
- return new ExprNodeConstantDesc(TypeInfoFactory.timestampLocalTZTypeInfo, literal.getValue3());
- }
return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, literal.getValue3());
}
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 0f6c5b5..6f4188c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -29,6 +29,7 @@
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.Aggregate.Group;
import org.apache.calcite.rex.RexInputRef;
import org.apache.calcite.util.ImmutableBitSet;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -173,7 +174,7 @@ private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf
}
// 2. Collect Grouping Set info
- if (aggRel.indicator) {
+ if (aggRel.getGroupType() != Group.SIMPLE) {
// 2.1 Translate Grouping set col bitset
ImmutableList lstGrpSet = aggRel.getGroupSets();
int bitmap = 0;
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 471675b..1885869 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -19,8 +19,6 @@
package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
-import org.apache.hadoop.hive.ql.parse.*;
-
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -40,7 +38,6 @@
import org.apache.calcite.rel.core.Join;
import org.apache.calcite.rel.core.JoinRelType;
import org.apache.calcite.rel.core.SemiJoin;
-import org.apache.calcite.rel.type.RelDataTypeField;
import org.apache.calcite.rex.RexInputRef;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.rex.RexNode;
@@ -62,7 +59,6 @@
import org.apache.hadoop.hive.ql.io.AcidUtils.Operation;
import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
@@ -74,8 +70,19 @@
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.parse.JoinCond;
+import org.apache.hadoop.hive.ql.parse.JoinType;
+import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec;
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression;
+import org.apache.hadoop.hive.ql.parse.PTFTranslator;
+import org.apache.hadoop.hive.ql.parse.ParseUtils;
+import org.apache.hadoop.hive.ql.parse.RowResolver;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.UnparseTranslator;
+import org.apache.hadoop.hive.ql.parse.WindowingComponentizer;
+import org.apache.hadoop.hive.ql.parse.WindowingSpec;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index abbffb0..8c36832 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -436,7 +436,7 @@ private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List c
// Calcite always needs the else clause to be defined explicitly
if (newChildRexNodeLst.size() % 2 == 0) {
newChildRexNodeLst.add(cluster.getRexBuilder().makeNullLiteral(
- newChildRexNodeLst.get(newChildRexNodeLst.size()-1).getType().getSqlTypeName()));
+ newChildRexNodeLst.get(newChildRexNodeLst.size()-1).getType()));
}
return newChildRexNodeLst;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 31a088b..51714cf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -335,7 +335,7 @@ private static String getName(GenericUDF hiveUDF) {
registerFunction("-", SqlStdOperatorTable.MINUS, hToken(HiveParser.MINUS, "-"));
registerFunction("*", SqlStdOperatorTable.MULTIPLY, hToken(HiveParser.STAR, "*"));
registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveParser.DIVIDE, "/"));
- registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.Identifier, "%"));
+ registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.MOD, "%"));
registerFunction("and", SqlStdOperatorTable.AND, hToken(HiveParser.KW_AND, "and"));
registerFunction("or", SqlStdOperatorTable.OR, hToken(HiveParser.KW_OR, "or"));
registerFunction("=", SqlStdOperatorTable.EQUALS, hToken(HiveParser.EQUAL, "="));
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index 34886f3..90e0377 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -36,12 +36,10 @@
import org.apache.calcite.util.ConversionUtil;
import org.apache.hadoop.hive.common.type.HiveChar;
import org.apache.hadoop.hive.common.type.HiveVarchar;
-import org.apache.hadoop.hive.common.type.TimestampTZ;
import org.apache.hadoop.hive.ql.exec.ColumnInfo;
import org.apache.hadoop.hive.ql.exec.RowSchema;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveType;
import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter.HiveToken;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.RowResolver;
@@ -65,7 +63,6 @@
private static final Map calciteToHiveTypeNameMap;
- // TODO: Handling of char[], varchar[], string...
static {
Builder b = ImmutableMap. builder();
b.put(SqlTypeName.BOOLEAN.getName(), new HiveToken(HiveParser.TOK_BOOLEAN, "TOK_BOOLEAN"));
@@ -203,7 +200,7 @@ public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtF
convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP);
break;
case TIMESTAMPLOCALTZ:
- convertedType = new HiveType(TimestampTZ.class);
+ convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
break;
case INTERVAL_YEAR_MONTH:
convertedType = dtFactory.createSqlIntervalType(
@@ -330,6 +327,8 @@ public static TypeInfo convertPrimitiveType(RelDataType rType) {
return TypeInfoFactory.dateTypeInfo;
case TIMESTAMP:
return TypeInfoFactory.timestampTypeInfo;
+ case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+ return TypeInfoFactory.timestampLocalTZTypeInfo;
case INTERVAL_YEAR:
case INTERVAL_MONTH:
case INTERVAL_YEAR_MONTH:
@@ -361,13 +360,7 @@ public static TypeInfo convertPrimitiveType(RelDataType rType) {
return TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME);
else
return TypeInfoFactory.getCharTypeInfo(charLength);
- case NULL:
- case OTHER:
default:
- if (rType instanceof HiveType && ((HiveType) rType).getTypeClass() == TimestampTZ.class) {
- // TODO: This block should be removed when we upgrade Calcite to use local time-zone
- return TypeInfoFactory.timestampLocalTZTypeInfo;
- }
return TypeInfoFactory.voidTypeInfo;
}
@@ -396,12 +389,10 @@ public static HiveToken hiveToken(RelDataType calciteType) {
.getPrecision()), String.valueOf(calciteType.getScale()));
}
break;
- case NULL:
- if (calciteType instanceof HiveType && ((HiveType) calciteType).getTypeClass() == TimestampTZ.class) {
- ht = new HiveToken(HiveParser.TOK_TIMESTAMPLOCALTZ, "TOK_TIMESTAMPLOCALTZ");
- break;
- }
- // fall-through
+ case TIMESTAMP_WITH_LOCAL_TIME_ZONE: {
+ ht = new HiveToken(HiveParser.TOK_TIMESTAMPLOCALTZ, "TOK_TIMESTAMPLOCALTZ");
+ }
+ break;
default:
ht = calciteToHiveTypeNameMap.get(calciteType.getSqlTypeName().getName());
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 6555269..b311bb5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -231,8 +231,6 @@
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression;
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec;
import org.apache.hadoop.hive.ql.parse.QBExpr.Opcode;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.PlannerContext;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.PlannerContextFactory;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
@@ -1357,6 +1355,9 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu
HiveRulesRegistry registry = new HiveRulesRegistry();
Properties calciteConfigProperties = new Properties();
calciteConfigProperties.setProperty(
+ CalciteConnectionProperty.TIME_ZONE.camelName(),
+ conf.getLocalTimeZone().getId());
+ calciteConfigProperties.setProperty(
CalciteConnectionProperty.MATERIALIZATIONS_ENABLED.camelName(),
Boolean.FALSE.toString());
CalciteConnectionConfig calciteConfig = new CalciteConnectionConfigImpl(calciteConfigProperties);
@@ -1500,7 +1501,8 @@ public RelOptMaterialization apply(RelOptMaterialization materialization) {
} else {
newViewScan = copyNodeScan(viewScan);
}
- return new RelOptMaterialization(newViewScan, materialization.queryRel, null);
+ return new RelOptMaterialization(newViewScan, materialization.queryRel, null,
+ materialization.qualifiedTableName);
}
private RelNode copyNodeScan(RelNode scan) {
@@ -2402,7 +2404,8 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc
List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
- dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
+ dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ intervals, null, null);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias,
getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,
@@ -2821,8 +2824,8 @@ private RelNode genGBRelNode(List gbExprs, List aggInfoLs
}
if (hasGroupSets) {
// Create GroupingID column
- AggregateCall aggCall = new AggregateCall(HiveGroupingID.INSTANCE,
- false, new ImmutableList.Builder().build(),
+ AggregateCall aggCall = AggregateCall.create(HiveGroupingID.INSTANCE,
+ false, new ImmutableList.Builder().build(), -1,
this.cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER),
HiveGroupingID.INSTANCE.getName());
aggregateCalls.add(aggCall);
@@ -2836,8 +2839,7 @@ private RelNode genGBRelNode(List gbExprs, List aggInfoLs
RelNode gbInputRel = HiveProject.create(srcRel, gbChildProjLst, null);
HiveRelNode aggregateRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
- gbInputRel, (transformedGroupSets!=null ? true:false), groupSet,
- transformedGroupSets, aggregateCalls);
+ gbInputRel, groupSet, transformedGroupSets, aggregateCalls);
return aggregateRel;
}
@@ -3095,19 +3097,6 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException
} else if (qbp.getDestGroupingSets().contains(detsClauseName)) {
groupingSets = getGroupingSets(grpByAstExprs, qbp, detsClauseName);
}
-
- final int limit = groupingColsSize * 2;
- while (groupingColsSize < limit) {
- String field = getColumnInternalName(groupingColsSize);
- outputColumnNames.add(field);
- groupByOutputRowResolver.put(null, field,
- new ColumnInfo(
- field,
- TypeInfoFactory.booleanTypeInfo,
- null,
- false));
- groupingColsSize++;
- }
}
// 6. Construct aggregation function Info
@@ -4061,7 +4050,7 @@ public RexNode apply(RelDataTypeField input) {
if (selForWindow != null && selExprList.getToken().getType() == HiveParser.TOK_SELECTDI) {
ImmutableBitSet groupSet = ImmutableBitSet.range(outputRel.getRowType().getFieldList().size());
outputRel = new HiveAggregate(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
- outputRel, false, groupSet, null, new ArrayList());
+ outputRel, groupSet, null, new ArrayList());
RowResolver groupByOutputRowResolver = new RowResolver();
for (int i = 0; i < out_rwsch.getColumnInfos().size(); i++) {
ColumnInfo colInfo = out_rwsch.getColumnInfos().get(i);
diff --git ql/src/test/results/clientpositive/druid_basic2.q.out ql/src/test/results/clientpositive/druid_basic2.q.out
index 2e9c340..b55e9ec 100644
--- ql/src/test/results/clientpositive/druid_basic2.q.out
+++ ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -554,7 +554,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
GatherStats: false
@@ -589,7 +589,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
@@ -615,7 +615,7 @@ STAGE PLANS:
columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
druid.datasource wikipedia
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
#### A masked pattern was here ####
name default.druid_table_1
@@ -834,7 +834,7 @@ STAGE PLANS:
alias: druid_table_1
filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Filter Operator
@@ -899,7 +899,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T00:00:00.000/1999-11-10T00:00:00.001"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T08:00:00.000/1999-11-10T08:00:00.001"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
diff --git ql/src/test/results/clientpositive/druid_intervals.q.out ql/src/test/results/clientpositive/druid_intervals.q.out
index 433f15e..4524242 100644
--- ql/src/test/results/clientpositive/druid_intervals.q.out
+++ ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -109,7 +109,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -139,7 +139,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-03-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-03-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -171,7 +171,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -201,7 +201,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -233,7 +233,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001","2012-01-01T00:00:00.000/2013-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.001","2012-01-01T08:00:00.000/2013-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -265,7 +265,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -295,7 +295,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2010-01-01T08:00:00.001","2011-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -325,7 +325,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+ druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2010-01-01T08:00:00.001","2011-01-01T08:00:00.000/2011-01-01T08:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
druid.query.type select
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
diff --git ql/src/test/results/clientpositive/druid_timeseries.q.out ql/src/test/results/clientpositive/druid_timeseries.q.out
index 0ce1abe..9f02741 100644
--- ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -109,7 +109,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
diff --git ql/src/test/results/clientpositive/druid_topn.q.out ql/src/test/results/clientpositive/druid_topn.q.out
index 052be15..8cb5213 100644
--- ql/src/test/results/clientpositive/druid_topn.q.out
+++ ql/src/test/results/clientpositive/druid_topn.q.out
@@ -119,7 +119,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"US/Pacific"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -153,7 +153,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"year","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"year","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -187,7 +187,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -221,7 +221,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
@@ -255,7 +255,7 @@ STAGE PLANS:
TableScan
alias: druid_table_1
properties:
- druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+ druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"US/Pacific","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
druid.query.type groupBy
Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
Select Operator
diff --git ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
index 13eac20..fcf203e 100644
--- ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
+++ ql/src/test/results/clientpositive/groupby_grouping_sets1.q.out
@@ -464,22 +464,21 @@ STAGE PLANS:
outputColumnNames: a
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
- keys: a (type: string), 0 (type: int)
+ keys: a (type: string)
mode: hash
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Group By Operator
- keys: KEY._col0 (type: string), KEY._col1 (type: int)
+ keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
@@ -532,35 +531,30 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
- keys: _col0 (type: double), 0 (type: int)
+ keys: _col0 (type: double)
mode: hash
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: double), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: double), _col1 (type: int)
+ key expressions: _col0 (type: double)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- value expressions: _col2 (type: bigint)
+ value expressions: _col1 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
- keys: KEY._col0 (type: double), KEY._col1 (type: int)
+ keys: KEY._col0 (type: double)
mode: mergepartial
- outputColumnNames: _col0, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
- Select Operator
- expressions: _col0 (type: double), _col2 (type: bigint)
- outputColumnNames: _col0, _col1
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
diff --git ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out
index 02636eb..1c45853 100644
--- ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out
+++ ql/src/test/results/clientpositive/groupby_grouping_sets_limit.q.out
@@ -357,23 +357,22 @@ STAGE PLANS:
outputColumnNames: a
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
- keys: a (type: string), 0 (type: int)
+ keys: a (type: string)
mode: hash
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: string), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
+ key expressions: _col0 (type: string)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.1
Reduce Operator Tree:
Group By Operator
- keys: KEY._col0 (type: string), KEY._col1 (type: int)
+ keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
Limit
Number of rows: 10
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
@@ -427,39 +426,34 @@ STAGE PLANS:
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
- keys: _col0 (type: double), 0 (type: int)
+ keys: _col0 (type: double)
mode: hash
- outputColumnNames: _col0, _col1, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
- key expressions: _col0 (type: double), _col1 (type: int)
- sort order: ++
- Map-reduce partition columns: _col0 (type: double), _col1 (type: int)
+ key expressions: _col0 (type: double)
+ sort order: +
+ Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
TopN Hash Memory Usage: 0.1
- value expressions: _col2 (type: bigint)
+ value expressions: _col1 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
- keys: KEY._col0 (type: double), KEY._col1 (type: int)
+ keys: KEY._col0 (type: double)
mode: mergepartial
- outputColumnNames: _col0, _col2
+ outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- pruneGroupingSetId: true
- Select Operator
- expressions: _col0 (type: double), _col2 (type: bigint)
- outputColumnNames: _col0, _col1
+ Limit
+ Number of rows: 10
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- Limit
- Number of rows: 10
+ File Output Operator
+ compressed: false
Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+ table:
+ input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+ output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator