From 53c0b095b67d2e3eac50cc66d5a91f07c92ee602 Mon Sep 17 00:00:00 2001 From: Nishant Bangarwa Date: Mon, 29 Jun 2020 04:41:57 +0530 Subject: [PATCH] [HIVE-23770] Fix Druid filter translation for inverted between --- .../HiveDruidPullInvertFromBetweenRule.java | 123 ++++++++++ .../HiveDruidPushInvertIntoBetweenRule.java | 92 ++++++++ .../hadoop/hive/ql/parse/CalcitePlanner.java | 6 +- .../TestHivePullInvertFromBetweenRule.java | 221 ++++++++++++++++++ .../TestHivePushInvertIntoBetweenRule.java | 202 ++++++++++++++++ .../clientpositive/druid_timestamptz2.q | 3 + .../druid/druid_timestamptz2.q.out | 39 ++++ 7 files changed, 685 insertions(+), 1 deletion(-) create mode 100644 ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPullInvertFromBetweenRule.java create mode 100644 ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPushInvertIntoBetweenRule.java create mode 100644 ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePullInvertFromBetweenRule.java create mode 100644 ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePushInvertIntoBetweenRule.java diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPullInvertFromBetweenRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPullInvertFromBetweenRule.java new file mode 100644 index 0000000000..bbd037caa4 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPullInvertFromBetweenRule.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.LinkedHashMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; +import com.google.common.collect.Multimaps; +import com.google.common.collect.Sets; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.RelOptRuleOperand; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This rule attempts to identify and pull out invert from BETWEENs. + * Since, Druid Rules have no way to handle invert due to mismatch in invert representation in + * HiveBetween and calcite SqlBetweenOperator. + */ +public class HiveDruidPullInvertFromBetweenRule extends RelOptRule { + + protected static final Log LOG = LogFactory.getLog(HiveDruidPullInvertFromBetweenRule.class); + + public static final HiveDruidPullInvertFromBetweenRule INSTANCE = + new HiveDruidPullInvertFromBetweenRule(); + + private HiveDruidPullInvertFromBetweenRule() { + super(operand(Filter.class, any())); + } + + @Override + public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); + final RexNode condition = RexUtil.pullFactors(rexBuilder, filter.getCondition()); + + RexPullInvertFromBetween t = new RexPullInvertFromBetween(rexBuilder); + RexNode newCondition = t.apply(condition); + + // If we could not transform anything, we bail out + if (newCondition.toString().equals(condition.toString())) { + return; + } + RelNode newNode = filter.copy(filter.getTraitSet(), filter.getInput(), newCondition); + + call.transformTo(newNode); + } + + protected static class RexPullInvertFromBetween extends RexShuttle { + private final RexBuilder rexBuilder; + + RexPullInvertFromBetween(RexBuilder rexBuilder) { + this.rexBuilder = rexBuilder; + } + + @Override + public RexNode visitCall(RexCall inputCall) { + RexNode node = super.visitCall(inputCall); + if (node instanceof RexCall && node.getKind() == SqlKind.BETWEEN) { + RexCall call = (RexCall) node; + Boolean isInverted = call.getOperands().get(0).isAlwaysTrue(); + if (isInverted) { + return rexBuilder.makeCall(SqlStdOperatorTable.NOT, rexBuilder.makeCall( + HiveBetween.INSTANCE, rexBuilder.makeLiteral(false), + call.getOperands().get(1), call.getOperands().get(2), call.getOperands().get(3))); + } + } + return node; + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPushInvertIntoBetweenRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPushInvertIntoBetweenRule.java new file mode 100644 index 0000000000..53019310d1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidPushInvertIntoBetweenRule.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.sql.SqlKind; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween; + +/** + * This rule is opposite of HiveDruidPullInvertFromBetweenRule + * It pushed invert back into Between. + */ +public class HiveDruidPushInvertIntoBetweenRule extends RelOptRule { + + protected static final Log LOG = LogFactory.getLog(HiveDruidPushInvertIntoBetweenRule.class); + + public static final HiveDruidPushInvertIntoBetweenRule INSTANCE = + new HiveDruidPushInvertIntoBetweenRule(); + + private HiveDruidPushInvertIntoBetweenRule() { + super(operand(Filter.class, any())); + } + + @Override + public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); + final RexNode condition = RexUtil.pullFactors(rexBuilder, filter.getCondition()); + + RexPullInvertFromBetween t = new RexPullInvertFromBetween(rexBuilder); + RexNode newCondition = t.apply(condition); + + // If we could not transform anything, we bail out + if (newCondition.toString().equals(condition.toString())) { + return; + } + RelNode newNode = filter.copy(filter.getTraitSet(), filter.getInput(), newCondition); + + call.transformTo(newNode); + } + + protected static class RexPullInvertFromBetween extends RexShuttle { + private final RexBuilder rexBuilder; + + RexPullInvertFromBetween(RexBuilder rexBuilder) { + this.rexBuilder = rexBuilder; + } + + @Override + public RexNode visitCall(RexCall inputCall) { + RexNode node = super.visitCall(inputCall); + if (node instanceof RexCall && node.getKind() == SqlKind.NOT) { + RexCall not = (RexCall) node; + RexNode operand = not.getOperands().get(0); + if (operand instanceof RexCall && operand.getKind() == SqlKind.BETWEEN) { + RexCall call = (RexCall) operand; + Boolean oldVal = call.getOperands().get(0).isAlwaysTrue(); + return rexBuilder.makeCall( + HiveBetween.INSTANCE, rexBuilder.makeLiteral(!oldVal), + call.getOperands().get(1), call.getOperands().get(2), call.getOperands().get(3)); + } + } + return node; + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index 335e25644a..c62415c97e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -201,6 +201,8 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateReduceRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateSplitRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveCardinalityPreservingJoinRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidPullInvertFromBetweenRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidPushInvertIntoBetweenRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidRules; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExceptRewriteRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule; @@ -2437,6 +2439,7 @@ private RelNode applyPostJoinOrderingTransform(RelNode basePlan, RelMetadataProv // 7. Apply Druid transformation rules generatePartialProgram(program, false, HepMatchOrder.DEPTH_FIRST, + HiveDruidPullInvertFromBetweenRule.INSTANCE, HiveDruidRules.FILTER_DATE_RANGE_RULE, HiveDruidRules.FILTER, HiveDruidRules.PROJECT_FILTER_TRANSPOSE, HiveDruidRules.AGGREGATE_FILTER_TRANSPOSE, @@ -2450,7 +2453,8 @@ private RelNode applyPostJoinOrderingTransform(RelNode basePlan, RelMetadataProv HiveDruidRules.HAVING_FILTER_RULE, HiveDruidRules.SORT_PROJECT_TRANSPOSE, HiveDruidRules.SORT, - HiveDruidRules.PROJECT_SORT_TRANSPOSE); + HiveDruidRules.PROJECT_SORT_TRANSPOSE, + HiveDruidPushInvertIntoBetweenRule.INSTANCE); // 8. Apply JDBC transformation rules if (conf.getBoolVar(ConfVars.HIVE_ENABLE_JDBC_PUSHDOWN)) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePullInvertFromBetweenRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePullInvertFromBetweenRule.java new file mode 100644 index 0000000000..e8507d0c52 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePullInvertFromBetweenRule.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptSchema; +import org.apache.calcite.plan.hep.HepPlanner; +import org.apache.calcite.plan.hep.HepProgramBuilder; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.tools.RelBuilder; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; +import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import static org.junit.Assert.assertEquals; + +@RunWith(MockitoJUnitRunner.class) +public class TestHivePullInvertFromBetweenRule { + + @Mock + private RelOptSchema schemaMock; + @Mock + RelOptHiveTable tableMock; + @Mock + Table hiveTableMDMock; + + private HepPlanner planner; + private RelBuilder builder; + + @SuppressWarnings("unused") + private static class MyRecord { + public int f1; + public int f2; + public int f3; + } + + @Before + public void before() { + HepProgramBuilder programBuilder = new HepProgramBuilder(); + programBuilder.addRuleInstance(HiveDruidPullInvertFromBetweenRule.INSTANCE); + + planner = new HepPlanner(programBuilder.build()); + + JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl(); + RexBuilder rexBuilder = new RexBuilder(typeFactory); + final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); + RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class); + Mockito.doReturn(rowTypeMock).when(tableMock).getRowType(); + Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); + Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); + + builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); + + } + + public RexNode or(RexNode... args) { + return builder.call(SqlStdOperatorTable.OR, args); + } + + public RexNode between(boolean invert, String field, int value1, int value2) { + return builder.call(HiveBetween.INSTANCE, + builder.literal(invert), builder.field(field), builder.literal(value1), builder.literal(value2)); + } + + public RexNode and(RexNode... args) { + return builder.call(SqlStdOperatorTable.AND, args); + } + + public RexNode not(RexNode... args) { + return builder.call(SqlStdOperatorTable.NOT, args); + } + + public RexNode gt(String field, int value) { + return builder.call(SqlStdOperatorTable.GREATER_THAN, + builder.field(field), builder.literal(value)); + } + + public RexNode lt(String field, int value) { + return builder.call(SqlStdOperatorTable.LESS_THAN, + builder.field(field), builder.literal(value)); + } + + @Test + public void testSimpleCase() { + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + between( true,"f1", 1,10) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("NOT(BETWEEN(false, $0, 1, 10))", condition.toString()); + } + + @Test + public void testSimpleNonInvertCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + between( false,"f1", 1,10) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("BETWEEN(false, $0, 1, 10)", condition.toString()); + } + + @Test + public void testBetweenWithOrCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + or( + between( true,"f1", 1,10), + between( true,"f2", 2,20) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("OR(NOT(BETWEEN(false, $0, 1, 10)), NOT(BETWEEN(false, $1, 2, 20)))", condition.toString()); + } + + @Test + public void testBetweenWithAndCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + and( + between( true,"f1", 1,10), + between( true,"f2", 2,20) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("AND(NOT(BETWEEN(false, $0, 1, 10)), NOT(BETWEEN(false, $1, 2, 20)))", condition.toString()); + } + + @Test + public void testBetweenAbsentCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + and( + lt("f1",10), + gt("f1",100) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("AND(<($0, 10), >($0, 100))", condition.toString()); + } + +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePushInvertIntoBetweenRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePushInvertIntoBetweenRule.java new file mode 100644 index 0000000000..062ec72f63 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePushInvertIntoBetweenRule.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.calcite.rules; + +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptSchema; +import org.apache.calcite.plan.hep.HepPlanner; +import org.apache.calcite.plan.hep.HepProgramBuilder; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.tools.RelBuilder; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; +import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import static org.junit.Assert.assertEquals; + +@RunWith(MockitoJUnitRunner.class) +public class TestHivePushInvertIntoBetweenRule { + + @Mock + private RelOptSchema schemaMock; + @Mock + RelOptHiveTable tableMock; + @Mock + Table hiveTableMDMock; + + private HepPlanner planner; + private RelBuilder builder; + + @SuppressWarnings("unused") + private static class MyRecord { + public int f1; + public int f2; + public int f3; + } + + @Before + public void before() { + HepProgramBuilder programBuilder = new HepProgramBuilder(); + programBuilder.addRuleInstance(HiveDruidPushInvertIntoBetweenRule.INSTANCE); + + planner = new HepPlanner(programBuilder.build()); + + JavaTypeFactoryImpl typeFactory = new JavaTypeFactoryImpl(); + RexBuilder rexBuilder = new RexBuilder(typeFactory); + final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); + RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class); + Mockito.doReturn(rowTypeMock).when(tableMock).getRowType(); + Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); + Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); + + builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); + + } + + public RexNode or(RexNode... args) { + return builder.call(SqlStdOperatorTable.OR, args); + } + + public RexNode between(boolean invert, String field, int value1, int value2) { + return builder.call(HiveBetween.INSTANCE, + builder.literal(invert), builder.field(field), builder.literal(value1), builder.literal(value2)); + } + + public RexNode and(RexNode... args) { + return builder.call(SqlStdOperatorTable.AND, args); + } + + public RexNode not(RexNode... args) { + return builder.call(SqlStdOperatorTable.NOT, args); + } + + public RexNode gt(String field, int value) { + return builder.call(SqlStdOperatorTable.GREATER_THAN, + builder.field(field), builder.literal(value)); + } + + public RexNode lt(String field, int value) { + return builder.call(SqlStdOperatorTable.LESS_THAN, + builder.field(field), builder.literal(value)); + } + + @Test + public void testSimpleCase() { + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + not( + between( false,"f1", 1,10) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("BETWEEN(true, $0, 1, 10)", condition.toString()); + } + + @Test + public void testSimpleAlreadyInvertCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + not( + between( true,"f1", 1,10) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("BETWEEN(false, $0, 1, 10)", condition.toString()); + } + + @Test + public void testBetweenWithOrCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + or( + not(between( false,"f1", 1,10)), + not(between( true,"f2", 2,20)) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("OR(BETWEEN(true, $0, 1, 10), BETWEEN(false, $1, 2, 20))", condition.toString()); + } + + @Test + public void testBetweenAbsentCase() { + + // @formatter:off + final RelNode basePlan = builder + .scan("t") + .filter( + and( + between( true,"f1", 1,10), + between( false,"f1", 1,10) + ) + ) + .build(); + // @formatter:on + + planner.setRoot(basePlan); + RelNode optimizedRelNode = planner.findBestExp(); + + HiveFilter filter = (HiveFilter) optimizedRelNode; + RexNode condition = filter.getCondition(); + assertEquals("AND(BETWEEN(true, $0, 1, 10), BETWEEN(false, $0, 1, 10))", condition.toString()); + } + +} diff --git a/ql/src/test/queries/clientpositive/druid_timestamptz2.q b/ql/src/test/queries/clientpositive/druid_timestamptz2.q index 29cc02f8c3..effe30fd82 100644 --- a/ql/src/test/queries/clientpositive/druid_timestamptz2.q +++ b/ql/src/test/queries/clientpositive/druid_timestamptz2.q @@ -61,3 +61,6 @@ insert into druid_test_table_utc2 values (cast('2015-03-10 23:59:59' as timestamp with local time zone), 'i3-end', 2); select * FROM druid_test_table_utc2; + +EXPLAIN select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))); +select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))); \ No newline at end of file diff --git a/ql/src/test/results/clientpositive/druid/druid_timestamptz2.q.out b/ql/src/test/results/clientpositive/druid/druid_timestamptz2.q.out index d9690319ae..ac017ab966 100644 --- a/ql/src/test/results/clientpositive/druid/druid_timestamptz2.q.out +++ b/ql/src/test/results/clientpositive/druid/druid_timestamptz2.q.out @@ -207,3 +207,42 @@ POSTHOOK: Output: hdfs://### HDFS PATH ### 2015-03-09 23:59:59.0 UTC i2-end 1.0 2015-03-10 00:00:00.0 UTC i3-start 2.0 2015-03-10 23:59:59.0 UTC i3-end 2.0 +PREHOOK: query: EXPLAIN select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))) +PREHOOK: type: QUERY +PREHOOK: Input: druid_test_dst@druid_test_table_1 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: EXPLAIN select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))) +POSTHOOK: type: QUERY +POSTHOOK: Input: druid_test_dst@druid_test_table_1 +POSTHOOK: Output: hdfs://### HDFS PATH ### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: druid_test_table_1 + properties: + druid.fieldNames interval_marker + druid.fieldTypes string + druid.query.json {"queryType":"scan","dataSource":"druid_test_dst.druid_test_table_1","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"filter":{"type":"not","field":{"type":"bound","dimension":"interval_marker","lower":"i2-start","lowerStrict":false,"upper":"i3-start","upperStrict":false,"ordering":"lexicographic"}},"columns":["interval_marker"],"resultFormat":"compactedList"} + druid.query.type scan + Select Operator + expressions: interval_marker (type: string) + outputColumnNames: _col0 + ListSink + +PREHOOK: query: select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))) +PREHOOK: type: QUERY +PREHOOK: Input: druid_test_dst@druid_test_table_1 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select `interval_marker` from druid_test_table_1 WHERE (NOT(((`interval_marker` >= 'i2-start') AND (`interval_marker` <= 'i3-start')))) +POSTHOOK: type: QUERY +POSTHOOK: Input: druid_test_dst@druid_test_table_1 +POSTHOOK: Output: hdfs://### HDFS PATH ### +i1-start +i1-end +i2-end -- 2.20.1 (Apple Git-117)