diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java index 201506f..d9db8c6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java @@ -26,10 +26,14 @@ import org.eigenbase.rel.TableAccessRelBase; import org.eigenbase.reltype.RelDataTypeField; import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexFieldCollation; import org.eigenbase.rex.RexInputRef; import org.eigenbase.rex.RexLiteral; import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexOver; import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.rex.RexWindow; +import org.eigenbase.rex.RexWindowBound; import org.eigenbase.sql.SqlKind; import org.eigenbase.sql.SqlOperator; import org.eigenbase.sql.type.BasicSqlType; @@ -49,7 +53,7 @@ SortRel order; Schema schema; - + ASTConverter(RelNode root) { this.root = root; hiveAST = new HiveAST(); @@ -128,10 +132,10 @@ public ASTNode convert(SortRel sortrel) { hiveAST.select = b.node(); /* - * 7. Order - * Use in Order By from the block above. RelNode has no pointer to parent - * hence we need to go top down; but OB at each block really belong to its - * src/from. Hence the need to pass in sortRel for each block from its parent. + * 7. Order Use in Order By from the block above. RelNode has no pointer to + * parent hence we need to go top down; but OB at each block really belong + * to its src/from. Hence the need to pass in sortRel for each block from + * its parent. */ if (sortrel != null) { HiveSortRel hiveSort = (HiveSortRel) sortrel; @@ -145,9 +149,9 @@ public ASTNode convert(SortRel sortrel) { * ASTNode on unqualified name. */ ASTNode astCol = ASTBuilder.unqualifiedName(cI.column); - ASTNode astNode = c.getDirection() == RelFieldCollation.Direction.ASCENDING - ? ASTBuilder.createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") - : ASTBuilder.createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode astNode = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); astNode.addChild(astCol); orderAst.addChild(astNode); } @@ -182,7 +186,8 @@ private QueryBlockInfo convertSource(RelNode r) { QueryBlockInfo right = convertSource(join.getRight()); s = new Schema(left.schema, right.schema); ASTNode cond = join.getCondition().accept(new RexVisitor(s)); - boolean semiJoin = ((join instanceof HiveJoinRel) && ((HiveJoinRel)join).isLeftSemiJoin()) ? true : false; + boolean semiJoin = ((join instanceof HiveJoinRel) && ((HiveJoinRel) join).isLeftSemiJoin()) ? true + : false; ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond, semiJoin); if (semiJoin) s = left.schema; @@ -264,6 +269,119 @@ public ASTNode visitLiteral(RexLiteral literal) { return ASTBuilder.literal(literal); } + private ASTNode getPSpecAST(RexWindow window) { + ASTNode pSpecAst = null; + + ASTNode dByAst = null; + if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) { + dByAst = ASTBuilder.createAST(HiveParser.TOK_DISTRIBUTEBY, "TOK_DISTRIBUTEBY"); + for (RexNode pk : window.partitionKeys) { + ASTNode astCol = pk.accept(this); + dByAst.addChild(astCol); + } + } + + ASTNode oByAst = null; + if (window.orderKeys != null && !window.orderKeys.isEmpty()) { + oByAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); + for (RexFieldCollation ok : window.orderKeys) { + ASTNode astNode = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode astCol = ok.left.accept(this); + astNode.addChild(astCol); + oByAst.addChild(astNode); + } + } + + if (dByAst != null || oByAst != null) { + pSpecAst = ASTBuilder.createAST(HiveParser.TOK_PARTITIONINGSPEC, "TOK_PARTITIONINGSPEC"); + if (dByAst != null) + pSpecAst.addChild(dByAst); + if (oByAst != null) + pSpecAst.addChild(oByAst); + } + + return pSpecAst; + } + + private ASTNode getWindowBound(RexWindowBound wb) { + ASTNode wbAST = null; + + if (wb.isCurrentRow()) { + wbAST = ASTBuilder.createAST(HiveParser.KW_CURRENT, "CURRENT"); + } else { + if (wb.isPreceding()) + wbAST = ASTBuilder.createAST(HiveParser.KW_PRECEDING, "PRECEDING"); + else + wbAST = ASTBuilder.createAST(HiveParser.KW_FOLLOWING, "FOLLOWING"); + if (wb.isUnbounded()) { + wbAST.addChild(ASTBuilder.createAST(HiveParser.KW_UNBOUNDED, "UNBOUNDED")); + } else { + ASTNode offset = wb.getOffset().accept(this); + wbAST.addChild(offset); + } + } + + return wbAST; + } + + private ASTNode getWindowRangeAST(RexWindow window) { + ASTNode wRangeAst = null; + + ASTNode startAST = null; + RexWindowBound ub = (RexWindowBound)window.getUpperBound(); + if (ub != null) { + startAST = getWindowBound(ub); + } + + ASTNode endAST = null; + RexWindowBound lb = (RexWindowBound)window.getLowerBound(); + if (lb != null) { + endAST = getWindowBound(lb); + } + + if (startAST != null || endAST != null) { + //NOTE: in Hive AST Rows->Range(Physical) & Range -> Values (logical) + if (window.isRows()) + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWRANGE, "TOK_WINDOWRANGE"); + else + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWVALUES, "TOK_WINDOWVALUES"); + if (startAST != null) + wRangeAst.addChild(startAST); + if (endAST != null) + wRangeAst.addChild(endAST); + } + + return wRangeAst; + } + + @Override + public ASTNode visitOver(RexOver over) { + if (!deep) { + return null; + } + + // 1. Translate the UDAF + final ASTNode wUDAFAst = visitCall(over); + + // 2. Add TOK_WINDOW as child of UDAF + ASTNode wSpec = ASTBuilder.createAST(HiveParser.TOK_WINDOWSPEC, "TOK_WINDOWSPEC"); + wUDAFAst.addChild(wSpec); + + // 3. Add Part Spec & Range Spec as child of TOK_WINDOW + final RexWindow window = over.getWindow(); + final ASTNode wPSpecAst = getPSpecAST(window); + final ASTNode wRangeAst = getWindowRangeAST(window); + if (wPSpecAst != null) + wSpec.addChild(wPSpecAst); + if (wRangeAst != null) + wSpec.addChild(wRangeAst); + + + return wUDAFAst; + } + @Override public ASTNode visitCall(RexCall call) { if (!deep) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 3da2491..053984e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -252,10 +252,21 @@ import org.eigenbase.rex.RexBuilder; import org.eigenbase.rex.RexInputRef; import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexWindowBound; +import org.eigenbase.rex.RexFieldCollation; +import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlWindow; +import org.eigenbase.sql.parser.SqlParserPos; +import org.eigenbase.sql.type.SqlTypeName; +import org.eigenbase.sql.SqlCall; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlNode; +import org.eigenbase.sql.SqlLiteral; import org.eigenbase.util.CompositeList; import com.google.common.base.Function; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; @@ -11762,7 +11773,6 @@ private boolean canHandleQuery() { && !queryProperties.hasClusterBy() && !queryProperties.hasDistributeBy() && !queryProperties.hasSortBy() - && !queryProperties.hasWindowing() && !queryProperties.hasPTF() && !queryProperties.usesScript() && !queryProperties.hasMultiDestQuery() @@ -12628,6 +12638,162 @@ private RelNode genLimitLogicalPlan(QB qb, RelNode srcRel) return sortRel; } + List getPartitionKeys(PartitionSpec ps, RexNodeConverter converter, RowResolver inputRR) + throws SemanticException { + List pKeys = new ArrayList(); + if (ps != null) { + List pExprs = ps.getExpressions(); + for (PartitionExpression pExpr : pExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(pExpr.getExpression(), inputRR, tcCtx); + pKeys.add(converter.convert(exp)); + } + } + + return pKeys; + } + + List getOrderKeys(OrderSpec os, RexNodeConverter converter, RowResolver inputRR) throws SemanticException { + List oKeys = new ArrayList(); + if (os != null) { + List oExprs = os.getExpressions(); + for (OrderExpression oExpr : oExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(oExpr.getExpression(), inputRR, tcCtx); + RexNode ordExp = converter.convert(exp); + Set flags = new HashSet(); + if (oExpr.getOrder() == org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC) + flags.add(SqlKind.DESCENDING); + oKeys.add(new RexFieldCollation(ordExp, flags)); + } + } + + return oKeys; + } + + RexWindowBound getBound(BoundarySpec bs, RexNodeConverter converter) { + RexWindowBound rwb = null; + + if (bs != null) { + SqlNode sn = null; + SqlParserPos pos = new SqlParserPos(1, 1); + SqlNode amt = bs.getAmt() == 0 ? null : SqlLiteral.createExactNumeric( + String.valueOf(bs.getAmt()), new SqlParserPos(2, 2)); + RexNode amtLiteral = null; + SqlCall sc = null; + RexNode rn = null; + + if (amt != null) + amtLiteral = m_cluster.getRexBuilder().makeLiteral(new Integer(bs.getAmt()), + m_cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER), true); + + switch (bs.getDirection()) { + case PRECEDING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedPreceding(pos), null); + } else { + sc = (SqlCall) SqlWindow.createPreceding(amt, pos); + rwb = RexWindowBound.create(sc, m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + + case CURRENT: + rwb = RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(1, 1)), null); + break; + + case FOLLOWING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(1, 1)), null); + } else { + sc = (SqlCall) SqlWindow.createFollowing(amt, pos); + rwb = RexWindowBound.create(sc, m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + } + } + + return rwb; + } + + + Pair genWindowingProj(QB qb, ASTNode windowProjAst, int wndSpecASTIndx, int wndProjPos, + RelNode srcRel) throws SemanticException { + RexNode w = null; + TypeInfo wHiveRetType = null; + QBParseInfo qbp = getQBParseInfo(qb); + WindowingSpec wSpec = qb.getAllWindowingSpecs().values().iterator().next(); + + if (wSpec != null) { + // 1. Get valid Window Function Spec + // NOTE: WindowSpec uses alias "_wcol0","_wcol1"... for + // WindowFunctionSpec + wSpec.validateAndMakeEffective(); + WindowExpressionSpec wExpSpec = wSpec.aliasToWdwExpr.get("_wcol" + wndProjPos); + //TODO: Throw exception if wExpSpec is not of type WindowFunctionSpec + if (wExpSpec instanceof WindowFunctionSpec) { + + // 2. Get Hive Aggregate Info + AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1, + this.m_relToHiveRR.get(srcRel)); + + // 3. Get Optiq Return type for Agg Fn + wHiveRetType = hiveAggInfo.m_returnType; + RelDataType optiqAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType, + this.m_cluster.getTypeFactory()); + + // 4. Convert Agg Fn args to Optiq + ImmutableMap posMap = this.m_relToHiveColNameOptiqPosMap.get(srcRel); + RexNodeConverter converter = new RexNodeConverter(this.m_cluster, srcRel.getRowType(), + posMap, 0, false); + Builder optiqAggFnArgsBldr = ImmutableList. builder(); + Builder optiqAggFnArgsTypeBldr = ImmutableList. builder(); + RexNode rexNd = null; + for (int i = 0; i < hiveAggInfo.m_aggParams.size(); i++) { + optiqAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i))); + optiqAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i) + .getTypeInfo(), this.m_cluster.getTypeFactory())); + } + ImmutableList optiqAggFnArgs = optiqAggFnArgsBldr.build(); + ImmutableList optiqAggFnArgsType = optiqAggFnArgsTypeBldr.build(); + + // 5. Get Optiq Agg Fn + final SqlAggFunction optiqAggFn = SqlFunctionConverter.getOptiqAggFn(hiveAggInfo.m_udfName, + optiqAggFnArgsType, optiqAggFnRetType); + + // 6. Translate Window spec + RowResolver inputRR = m_relToHiveRR.get(srcRel); + WindowSpec wndSpec = ((WindowFunctionSpec) wExpSpec).getWindowSpec(); + List partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR); + List orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR); + RexWindowBound upperBound = getBound(wndSpec.windowFrame.start, converter); + RexWindowBound lowerBound = getBound(wndSpec.windowFrame.end, converter); + boolean isRows = ((wndSpec.windowFrame.start instanceof RangeBoundarySpec) || (wndSpec.windowFrame.end instanceof RangeBoundarySpec)) ? true + : false; + + w = m_cluster.getRexBuilder().makeOver(optiqAggFnRetType, optiqAggFn, optiqAggFnArgs, + partitionKeys, ImmutableList. copyOf(orderKeys), lowerBound, + upperBound, isRows, true, false); + } else { + //TODO: Convert to Semantic Exception + throw new RuntimeException("Unsupported window Spec"); + } + } + + return new Pair(w, wHiveRetType); + } + + int getWindowSpecIndx(ASTNode wndAST) { + int wndASTIndx = -1; + int wi = wndAST.getChildCount() - 1; + if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveParser.TOK_WINDOWSPEC)) { + wi = -1; + } + + return wi; + } + /** * NOTE: there can only be one select caluse since we don't handle multi * destination insert. @@ -12638,6 +12804,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { boolean subQuery; ArrayList col_list = new ArrayList(); + ArrayList> windowingRexNodes = new ArrayList>(); // 1. Get Select Expression List QBParseInfo qbp = getQBParseInfo(qb); @@ -12688,6 +12855,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) // 6. Iterate over all expression (after SELECT) ASTNode exprList = selExprList; int startPosn = posn; + int wndProjPos = 0; List tabAliasesForAllProjs = getTabAliases(inputRR); for (int i = startPosn; i < exprList.getChildCount(); ++i) { @@ -12695,11 +12863,38 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) ASTNode child = (ASTNode) exprList.getChild(i); boolean hasAsClause = (!isInTransform) && (child.getChildCount() == 2); - // 6.2 bail out if it is windowing spec - boolean isWindowSpec = child.getChildCount() == 3 ? (child.getChild(2) - .getType() == HiveParser.TOK_WINDOWSPEC) : false; - if (isWindowSpec) - throw new RuntimeException("Windowing is not supported yet"); + + // 6.2 Handle windowing spec + int wndSpecASTIndx = -1; + //TODO: is the check ((child.getChildCount() == 1) || hasAsClause) needed? + boolean isWindowSpec = (((child.getChildCount() == 1) || hasAsClause) && child.getChild(0) + .getType() == HiveParser.TOK_FUNCTION) ? ((wndSpecASTIndx = getWindowSpecIndx((ASTNode) child + .getChild(0))) > 0) : false; + if (isWindowSpec) { + Pair wtp = genWindowingProj(qb, (ASTNode) child.getChild(0), wndSpecASTIndx, + wndProjPos, srcRel); + windowingRexNodes.add(new Pair(pos, wtp.getFirst())); + + // 6.2.1 Check if window expr has alias + String colAlias = null; + ASTNode tabOrColAst = (ASTNode) child.getChild(1); + if (tabOrColAst != null) + colAlias = BaseSemanticAnalyzer.getUnescapedName(tabOrColAst); + + // 6.2.2 Update Output Row Schema + ColumnInfo oColInfo = new ColumnInfo(getColumnInternalName(pos), wtp.getSecond(), null, + false); + if (colAlias != null) { + out_rwsch.checkColumn(null, colAlias); + out_rwsch.put(null, colAlias, oColInfo); + } else { + out_rwsch.putExpression(child, oColInfo); + } + + pos = Integer.valueOf(pos.intValue() + 1); + wndProjPos++; + continue; + } // 6.3 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's // This check is not needed and invalid when there is a transform b/c @@ -12805,9 +13000,15 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) for (ExprNodeDesc colExpr : col_list) { optiqColLst.add(rexNodeConv.convert(colExpr)); } + + // 9. Add windowing Proj Names + for (Pair wndPair : windowingRexNodes) { + optiqColLst.add(wndPair.getFirst(), wndPair.getSecond()); + columnNames.add(getColumnInternalName(wndPair.getFirst())); + } - // 9. Construct Hive Project Rel - // 9.1. Prepend column names with '_o_' + // 10. Construct Hive Project Rel + // 10.1. Prepend column names with '_o_' /* * Hive treats names that start with '_c' as internalNames; so change the * names so we don't run into this issue when converting back to Hive AST. @@ -12818,11 +13019,11 @@ public String apply(String hName) { return "_o_" + hName; } }); - // 9.2 Build Optiq Rel Node for project using converted projections & col + // 10.2 Build Optiq Rel Node for project using converted projections & col // names HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames); - // 10. Keep track of colname-to-posmap && RR for new select + // 11. Keep track of colname-to-posmap && RR for new select this.m_relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel)); this.m_relToHiveRR.put(selRel, out_rwsch); diff --git a/ql/src/test/queries/clientpositive/cbo_correctness.q b/ql/src/test/queries/clientpositive/cbo_correctness.q index 65ed130..c01727c 100644 --- a/ql/src/test/queries/clientpositive/cbo_correctness.q +++ b/ql/src/test/queries/clientpositive/cbo_correctness.q @@ -172,3 +172,13 @@ select count(distinct c_int) as a, avg(c_float) from t1 group by c_float; select count(distinct c_int) as a, avg(c_float) from t1 group by c_int; select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int; +-- 9. Test Windowing Functions +select count(c_int) over() from t1; +select * from (select count(c_int) over() from t1) t1; +select count(c_int) over(), sum(c_float) over() from t1; +select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1; +select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1; +select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1; + + + diff --git a/ql/src/test/results/clientpositive/cbo_correctness.q.out b/ql/src/test/results/clientpositive/cbo_correctness.q.out index 49f2661..93e3f59 100644 --- a/ql/src/test/results/clientpositive/cbo_correctness.q.out +++ b/ql/src/test/results/clientpositive/cbo_correctness.q.out @@ -15776,3 +15776,173 @@ POSTHOOK: Input: default@t1 #### A masked pattern was here #### 0 NULL 1 1.0 +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select * from (select count(c_int) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +18 18.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL