diff --git ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java index 1b76fc9..38423cf 100644 --- ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java +++ ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java @@ -409,6 +409,13 @@ {"ColumnUnaryFunc", "FuncSign", "double", "double", "MathExpr.sign", "", "", ""}, {"ColumnUnaryFunc", "FuncSign", "double", "long", "MathExpr.sign", "(double)", "", ""}, + {"DecimalColumnUnaryFunc", "FuncFloor", "decimal", "DecimalUtil.floor"}, + {"DecimalColumnUnaryFunc", "FuncCeil", "decimal", "DecimalUtil.ceiling"}, + {"DecimalColumnUnaryFunc", "FuncAbs", "decimal", "DecimalUtil.abs"}, + {"DecimalColumnUnaryFunc", "FuncSign", "long", "DecimalUtil.sign"}, + {"DecimalColumnUnaryFunc", "FuncRound", "decimal", "DecimalUtil.round"}, + {"DecimalColumnUnaryFunc", "FuncNegate", "decimal", "DecimalUtil.negate"}, + // Casts {"ColumnUnaryFunc", "Cast", "long", "double", "", "", "(long)", ""}, {"ColumnUnaryFunc", "Cast", "double", "long", "", "", "(double)", ""}, @@ -615,6 +622,8 @@ private void generate() throws Exception { generateColumnUnaryMinus(tdesc); } else if (tdesc[0].equals("ColumnUnaryFunc")) { generateColumnUnaryFunc(tdesc); + } else if (tdesc[0].equals("DecimalColumnUnaryFunc")) { + generateDecimalColumnUnaryFunc(tdesc); } else if (tdesc[0].equals("VectorUDAFMinMax")) { generateVectorUDAFMinMax(tdesc); } else if (tdesc[0].equals("VectorUDAFMinMaxString")) { @@ -969,6 +978,25 @@ private void generateIfExprScalarScalar(String[] tdesc) throws IOException { className, templateString); } + // template, , , + private void generateDecimalColumnUnaryFunc(String [] tdesc) throws IOException { + String classNamePrefix = tdesc[1]; + String returnType = tdesc[2]; + String operandType = "decimal"; + String outputColumnVectorType = this.getColumnVectorType(returnType); + String className = classNamePrefix + getCamelCaseType(operandType) + "To" + + getCamelCaseType(returnType); + File templateFile = new File(joinPath(this.expressionTemplateDirectory, tdesc[0] + ".txt")); + String templateString = readFile(templateFile); + String funcName = tdesc[3]; + // Expand, and write result + templateString = templateString.replaceAll("", className); + templateString = templateString.replaceAll("", outputColumnVectorType); + templateString = templateString.replaceAll("", funcName); + writeFile(templateFile.lastModified(), expressionOutputDirectory, expressionClassesDirectory, + className, templateString); + } + // template, , , , , , private void generateColumnUnaryFunc(String[] tdesc) throws IOException { String classNamePrefix = tdesc[1]; @@ -1346,6 +1374,8 @@ static String getCamelCaseType(String type) { return "Long"; } else if (type.equals("double")) { return "Double"; + } else if (type.equals("decimal")) { + return "Decimal"; } else { return type; } @@ -1372,6 +1402,8 @@ private String getArithmeticReturnType(String operandType1, private String getColumnVectorType(String primitiveType) { if(primitiveType!=null && primitiveType.equals("double")) { return "DoubleColumnVector"; + } else if (primitiveType != null && primitiveType.equals("decimal")) { + return "DecimalColumnVector"; } return "LongColumnVector"; } diff --git common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java index 2e0f058..1c828af 100644 --- common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java +++ common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java @@ -1735,7 +1735,7 @@ public void updateFixedPoint(long val, short scale) { * Argument scratch is needed to hold unused remainder output, to avoid need to * create a new object. */ - public void zeroFractionPart() { + public void zeroFractionPart(UnsignedInt128 scratch) { short placesToRemove = this.getScale(); // If there's no fraction part, return immediately to avoid the cost of a divide. @@ -1748,10 +1748,6 @@ public void zeroFractionPart() { */ UnsignedInt128 powerTenDivisor = SqlMathUtil.POWER_TENS_INT128[placesToRemove]; - /* A scratch variable is created here. This could be optimized in the future - * by perhaps using thread-local storage to allocate this scratch field. - */ - UnsignedInt128 scratch = new UnsignedInt128(); this.getUnscaledValue().divideDestructive(powerTenDivisor, scratch); /* Multiply by the same power of ten to shift the decimal point back to @@ -1759,4 +1755,12 @@ public void zeroFractionPart() { */ this.getUnscaledValue().scaleUpTenDestructive(placesToRemove); } + + public void zeroFractionPart() { + /* A scratch variable is created here. This could be optimized in the future + * by perhaps using thread-local storage to allocate this scratch field. + */ + UnsignedInt128 scratch = new UnsignedInt128(); + zeroFractionPart(scratch); + } } diff --git ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt new file mode 100644 index 0000000..86bd0a2 --- /dev/null +++ ql/src/gen/vectorization/ExpressionTemplates/DecimalColumnUnaryFunc.txt @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.expressions.gen; + +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.expressions.MathExpr; +import org.apache.hadoop.hive.ql.exec.vector.*; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; +import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; + + +public class extends VectorExpression { + private static final long serialVersionUID = 1L; + + private int colNum; + private int outputColumn; + + public (int colNum, int outputColumn) { + this(); + this.colNum = colNum; + this.outputColumn = outputColumn; + } + + public () { + super(); + } + + @Override + public void evaluate(VectorizedRowBatch batch) { + + if (childExpressions != null) { + this.evaluateChildren(batch); + } + + DecimalColumnVector inputColVector = (DecimalColumnVector) batch.cols[colNum]; + outputColVector = () batch.cols[outputColumn]; + int[] sel = batch.selected; + boolean[] inputIsNull = inputColVector.isNull; + boolean[] outputIsNull = outputColVector.isNull; + outputColVector.noNulls = inputColVector.noNulls; + int n = batch.size; + Decimal128[] vector = inputColVector.vector; + + // return immediately if batch is empty + if (n == 0) { + return; + } + + if (inputColVector.isRepeating) { + //All must be selected otherwise size would be zero + //Repeating property will not change. + outputIsNull[0] = inputIsNull[0]; + (0, vector[0], outputColVector); + outputColVector.isRepeating = true; + } else if (inputColVector.noNulls) { + if (batch.selectedInUse) { + for(int j = 0; j != n; j++) { + int i = sel[j]; + + // Set isNull because decimal operation can yield a null. + outputIsNull[i] = false; + (i, vector[i], outputColVector); + } + } else { + for(int i = 0; i != n; i++) { + + // Set isNull because decimal operation can yield a null. + outputIsNull[i] = false; + (i, vector[i], outputColVector); + } + } + outputColVector.isRepeating = false; + } else /* there are nulls */ { + if (batch.selectedInUse) { + for(int j = 0; j != n; j++) { + int i = sel[j]; + outputIsNull[i] = inputIsNull[i]; + (i, vector[i], outputColVector); + } + } else { + System.arraycopy(inputIsNull, 0, outputIsNull, 0, n); + for(int i = 0; i != n; i++) { + (i, vector[i], outputColVector); + } + } + outputColVector.isRepeating = false; + } + } + + @Override + public int getOutputColumn() { + return outputColumn; + } + + @Override + public String getOutputType() { + return outputType; + } + + public int getColNum() { + return colNum; + } + + public void setColNum(int colNum) { + this.colNum = colNum; + } + + public void setOutputColumn(int outputColumn) { + this.outputColumn = outputColumn; + } + + @Override + public VectorExpressionDescriptor.Descriptor getDescriptor() { + return (new VectorExpressionDescriptor.Builder()) + .setMode( + VectorExpressionDescriptor.Mode.PROJECTION) + .setNumArguments(1) + .setArgumentTypes( + VectorExpressionDescriptor.ArgumentType.DECIMAL) + .setInputExpressionTypes( + VectorExpressionDescriptor.InputExpressionType.COLUMN).build(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index f69bfc0..e6d1141 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -81,16 +81,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.udf.UDFConv; -import org.apache.hadoop.hive.ql.udf.UDFHex; -import org.apache.hadoop.hive.ql.udf.UDFToBoolean; -import org.apache.hadoop.hive.ql.udf.UDFToByte; -import org.apache.hadoop.hive.ql.udf.UDFToDouble; -import org.apache.hadoop.hive.ql.udf.UDFToFloat; -import org.apache.hadoop.hive.ql.udf.UDFToInteger; -import org.apache.hadoop.hive.ql.udf.UDFToLong; -import org.apache.hadoop.hive.ql.udf.UDFToShort; -import org.apache.hadoop.hive.ql.udf.UDFToString; +import org.apache.hadoop.hive.ql.udf.*; import org.apache.hadoop.hive.ql.udf.generic.*; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -340,7 +331,7 @@ private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, List getChildExpressionsWithImplicitCast(GenericUDF genericUDF, List children, TypeInfo returnType) { - if (isCastExpression(genericUDF)) { + if (isExcludedFromCast(genericUDF)) { // No implicit cast needed return children; @@ -348,6 +339,7 @@ private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, List childrenWithCasts = new ArrayList(); boolean atleastOneCastNeeded = false; @@ -367,12 +359,18 @@ private TypeInfo getCommonTypeForChildExpressions(GenericUDF genericUdf, List udfClass = ((GenericUDFBridge) genericUDF).getUdfClass(); + return castExpressionUdfs.contains(udfClass) + || UDFSign.class.isAssignableFrom(udfClass); } return false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java index 589450f..cdc0ea0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java @@ -19,15 +19,25 @@ package org.apache.hadoop.hive.ql.exec.vector.expressions; import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.SqlMathUtil; import org.apache.hadoop.hive.common.type.UnsignedInt128; import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.apache.hadoop.hive.ql.udf.generic.RoundUtils; /** * Utility functions for vector operations on decimal values. */ public class DecimalUtil { + public static final Decimal128 DECIMAL_ONE = new Decimal128(); + private static final UnsignedInt128 scratchUInt128 = new UnsignedInt128(); + + static { + DECIMAL_ONE.update(1L, (short) 0); + } + // Addition with overflow check. Overflow produces NULL output. public static void addChecked(int i, Decimal128 left, Decimal128 right, DecimalColumnVector outputColVector) { @@ -87,4 +97,73 @@ public static void moduloChecked(int i, Decimal128 left, Decimal128 right, outputColVector.isNull[i] = true; } } + + public static void floor(int i, Decimal128 input, DecimalColumnVector outputColVector) { + try { + Decimal128 result = outputColVector.vector[i]; + result.update(input); + result.zeroFractionPart(scratchUInt128); + result.changeScaleDestructive(outputColVector.scale); + if ((result.compareTo(input) != 0) && input.getSignum() < 0) { + result.subtractDestructive(DECIMAL_ONE, outputColVector.scale); + } + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void ceiling(int i, Decimal128 input, DecimalColumnVector outputColVector) { + try { + Decimal128 result = outputColVector.vector[i]; + result.update(input); + result.zeroFractionPart(scratchUInt128); + result.changeScaleDestructive(outputColVector.scale); + if ((result.compareTo(input) != 0) && input.getSignum() > 0) { + result.addDestructive(DECIMAL_ONE, outputColVector.scale); + } + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void round(int i, Decimal128 input, DecimalColumnVector outputColVector) { + HiveDecimal inputHD = HiveDecimal.create(input.toBigDecimal()); + HiveDecimal result = RoundUtils.round(inputHD, outputColVector.scale); + if (result == null) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } else { + outputColVector.vector[i].update(result.bigDecimalValue().toPlainString(), outputColVector.scale); + } + } + + public static void sign(int i, Decimal128 input, LongColumnVector outputColVector) { + outputColVector.vector[i] = input.getSignum(); + } + + public static void abs(int i, Decimal128 input, DecimalColumnVector outputColVector) { + Decimal128 result = outputColVector.vector[i]; + try { + result.update(input); + result.absDestructive(); + result.changeScaleDestructive(outputColVector.scale); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } + + public static void negate(int i, Decimal128 input, DecimalColumnVector outputColVector) { + Decimal128 result = outputColVector.vector[i]; + try { + result.update(input); + result.negateDestructive(); + result.changeScaleDestructive(outputColVector.scale); + } catch (ArithmeticException e) { + outputColVector.noNulls = false; + outputColVector.isNull[i] = true; + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java new file mode 100644 index 0000000..6f71438 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.expressions; + +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor; +import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.expressions.DecimalUtil; + + +public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression { + private static final long serialVersionUID = 1L; + + private int colNum; + private int outputColumn; + private int decimalPlaces; + + public FuncRoundWithNumDigitsDecimalToDecimal(int colNum, int scalarValue, int outputColumn) { + this(); + this.colNum = colNum; + this.outputColumn = outputColumn; + this.decimalPlaces = scalarValue; + this.outputType = "decimal"; + } + + public FuncRoundWithNumDigitsDecimalToDecimal() { + super(); + } + + @Override + public void evaluate(VectorizedRowBatch batch) { + + if (childExpressions != null) { + this.evaluateChildren(batch); + } + + DecimalColumnVector inputColVector = (DecimalColumnVector) batch.cols[colNum]; + DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumn]; + int[] sel = batch.selected; + boolean[] inputIsNull = inputColVector.isNull; + boolean[] outputIsNull = outputColVector.isNull; + outputColVector.noNulls = inputColVector.noNulls; + int n = batch.size; + Decimal128[] vector = inputColVector.vector; + + // return immediately if batch is empty + if (n == 0) { + return; + } + + if (inputColVector.isRepeating) { + //All must be selected otherwise size would be zero + //Repeating property will not change. + outputIsNull[0] = inputIsNull[0]; + DecimalUtil.round(0, vector[0], outputColVector); + outputColVector.isRepeating = true; + } else if (inputColVector.noNulls) { + if (batch.selectedInUse) { + for(int j = 0; j != n; j++) { + int i = sel[j]; + + // Set isNull because decimal operation can yield a null. + outputIsNull[i] = false; + DecimalUtil.round(i, vector[i], outputColVector); + } + } else { + for(int i = 0; i != n; i++) { + + // Set isNull because decimal operation can yield a null. + outputIsNull[i] = false; + DecimalUtil.round(i, vector[i], outputColVector); + } + } + outputColVector.isRepeating = false; + } else /* there are nulls */ { + if (batch.selectedInUse) { + for(int j = 0; j != n; j++) { + int i = sel[j]; + outputIsNull[i] = inputIsNull[i]; + DecimalUtil.round(i, vector[i], outputColVector); + } + } else { + System.arraycopy(inputIsNull, 0, outputIsNull, 0, n); + for(int i = 0; i != n; i++) { + DecimalUtil.round(i, vector[i], outputColVector); + } + } + outputColVector.isRepeating = false; + } + } + + @Override + public int getOutputColumn() { + return outputColumn; + } + + @Override + public String getOutputType() { + return outputType; + } + + public int getColNum() { + return colNum; + } + + public void setColNum(int colNum) { + this.colNum = colNum; + } + + public void setOutputColumn(int outputColumn) { + this.outputColumn = outputColumn; + } + + public int getDecimalPlaces() { + return decimalPlaces; + } + + public void setDecimalPlaces(int decimalPlaces) { + this.decimalPlaces = decimalPlaces; + } + + + @Override + public VectorExpressionDescriptor.Descriptor getDescriptor() { + return (new VectorExpressionDescriptor.Builder()) + .setMode( + VectorExpressionDescriptor.Mode.PROJECTION) + .setNumArguments(2) + .setArgumentTypes( + VectorExpressionDescriptor.ArgumentType.DECIMAL, + VectorExpressionDescriptor.ArgumentType.LONG) + .setInputExpressionTypes( + VectorExpressionDescriptor.InputExpressionType.COLUMN, + VectorExpressionDescriptor.InputExpressionType.SCALAR).build(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java index 628f06d..6e4bee0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncSignDecimalToLong; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncSignDoubleToDouble; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncSignLongToDouble; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -36,7 +37,7 @@ " > SELECT _FUNC_(40) FROM src LIMIT 1;\n" + " 1" ) -@VectorizedExpressions({FuncSignLongToDouble.class, FuncSignDoubleToDouble.class}) +@VectorizedExpressions({FuncSignLongToDouble.class, FuncSignDoubleToDouble.class, FuncSignDecimalToLong.class}) public class UDFSign extends UDF { @SuppressWarnings("unused") diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAbs.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAbs.java index 1c1bcfe..ee7b821 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAbs.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAbs.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsDoubleToDouble; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncAbsLongToLong; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -48,7 +49,7 @@ + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(-5) FROM src LIMIT 1;\n" + " 5") -@VectorizedExpressions({FuncAbsLongToLong.class, FuncAbsDoubleToDouble.class}) +@VectorizedExpressions({FuncAbsLongToLong.class, FuncAbsDoubleToDouble.class, FuncAbsDecimalToDecimal.class}) public class GenericUDFAbs extends GenericUDF { private transient PrimitiveCategory inputType; private final DoubleWritable resultDouble = new DoubleWritable(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCeil.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCeil.java index ceb56bb..95ec32e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCeil.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCeil.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncCeilDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncCeilDoubleToLong; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncCeilLongToLong; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -33,7 +34,7 @@ + " > SELECT _FUNC_(-0.1) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(5) FROM src LIMIT 1;\n" + " 5") -@VectorizedExpressions({FuncCeilLongToLong.class, FuncCeilDoubleToLong.class}) +@VectorizedExpressions({FuncCeilLongToLong.class, FuncCeilDoubleToLong.class, FuncCeilDecimalToDecimal.class}) public final class GenericUDFCeil extends GenericUDFFloorCeilBase { public GenericUDFCeil() { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFloor.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFloor.java index a95a263..8ad15e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFloor.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFloor.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncFloorDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncFloorDoubleToLong; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncFloorLongToLong; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -33,7 +34,7 @@ + " > SELECT _FUNC_(-0.1) FROM src LIMIT 1;\n" + " -1\n" + " > SELECT _FUNC_(5) FROM src LIMIT 1;\n" + " 5") -@VectorizedExpressions({FuncFloorLongToLong.class, FuncFloorDoubleToLong.class}) +@VectorizedExpressions({FuncFloorLongToLong.class, FuncFloorDoubleToLong.class, FuncFloorDecimalToDecimal.class}) public final class GenericUDFFloor extends GenericUDFFloorCeilBase { public GenericUDFFloor() { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNegative.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNegative.java index f355a82..d653264 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNegative.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNegative.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.DoubleColUnaryMinus; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncNegateDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.LongColUnaryMinus; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.io.ByteWritable; @@ -33,7 +34,7 @@ import org.apache.hadoop.io.LongWritable; @Description(name = "-", value = "_FUNC_ a - Returns -a") -@VectorizedExpressions({LongColUnaryMinus.class, DoubleColUnaryMinus.class}) +@VectorizedExpressions({LongColUnaryMinus.class, DoubleColUnaryMinus.class, FuncNegateDecimalToDecimal.class}) public class GenericUDFOPNegative extends GenericUDFBaseUnary { public GenericUDFOPNegative() { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java index 5cc8025..387de5e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java @@ -23,7 +23,9 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; +import org.apache.hadoop.hive.ql.exec.vector.expressions.FuncRoundWithNumDigitsDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.RoundWithNumDigitsDoubleToDouble; +import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncRoundDecimalToDecimal; import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncRoundDoubleToDouble; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.io.ByteWritable; @@ -65,7 +67,8 @@ value = "_FUNC_(x[, d]) - round x to d decimal places", extended = "Example:\n" + " > SELECT _FUNC_(12.3456, 1) FROM src LIMIT 1;\n" + " 12.3'") -@VectorizedExpressions({FuncRoundDoubleToDouble.class, RoundWithNumDigitsDoubleToDouble.class}) +@VectorizedExpressions({FuncRoundDoubleToDouble.class, RoundWithNumDigitsDoubleToDouble.class, + FuncRoundWithNumDigitsDecimalToDecimal.class, FuncRoundDecimalToDecimal.class}) public class GenericUDFRound extends GenericUDF { private transient PrimitiveObjectInspector inputOI; private int scale = 0; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java new file mode 100644 index 0000000..48e7819 --- /dev/null +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java @@ -0,0 +1,190 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector.expressions; + +import junit.framework.Assert; +import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.junit.Test; + +/** + * Unit tests for DecimalUtil. + */ +public class TestDecimalUtil { + + @Test + public void testFloor() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + Decimal128 expected1 = new Decimal128(19, (short)0); + DecimalUtil.floor(0, d1, dcv); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(23.0, (short) 5); + Decimal128 expected2 = new Decimal128(23, (short)0); + DecimalUtil.floor(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + + Decimal128 d3 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected3 = new Decimal128(-26, (short)0); + DecimalUtil.floor(0, d3, dcv); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + + Decimal128 d4 = new Decimal128(-17, (short) 5); + Decimal128 expected4 = new Decimal128(-17, (short)0); + DecimalUtil.floor(0, d4, dcv); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + } + + @Test + public void testCeiling() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + Decimal128 expected1 = new Decimal128(20, (short)0); + DecimalUtil.ceiling(0, d1, dcv); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(23.0, (short) 5); + Decimal128 expected2 = new Decimal128(23, (short)0); + DecimalUtil.ceiling(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + + Decimal128 d3 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected3 = new Decimal128(-25, (short)0); + DecimalUtil.ceiling(0, d3, dcv); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + + Decimal128 d4 = new Decimal128(-17, (short) 5); + Decimal128 expected4 = new Decimal128(-17, (short)0); + DecimalUtil.ceiling(0, d4, dcv); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + } + + @Test + public void testAbs() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + DecimalUtil.abs(0, d1, dcv); + Assert.assertEquals(0, d1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected2 = new Decimal128(25.34567, (short)5); + DecimalUtil.abs(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + } + + @Test + public void testRound() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 0); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + Decimal128 expected1 = new Decimal128(20, (short)0); + DecimalUtil.round(0, d1, dcv); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(23.0, (short) 5); + Decimal128 expected2 = new Decimal128(23, (short)0); + DecimalUtil.round(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + + Decimal128 d3 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected3 = new Decimal128(-25, (short)0); + DecimalUtil.round(0, d3, dcv); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + + Decimal128 d4 = new Decimal128(-17, (short) 5); + Decimal128 expected4 = new Decimal128(-17, (short)0); + DecimalUtil.round(0, d4, dcv); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + + Decimal128 d5 = new Decimal128(19.36778, (short) 5); + Decimal128 expected5 = new Decimal128(19, (short)0); + DecimalUtil.round(0, d5, dcv); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + + Decimal128 d6 = new Decimal128(-25.54567, (short) 5); + Decimal128 expected6 = new Decimal128(-26, (short)0); + DecimalUtil.round(0, d6, dcv); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + } + + @Test + public void testRoundWithDigits() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 3); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + Decimal128 expected1 = new Decimal128(19.568, (short)3); + DecimalUtil.round(0, d1, dcv); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(23.567, (short) 5); + Decimal128 expected2 = new Decimal128(23.567, (short)3); + DecimalUtil.round(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + + Decimal128 d3 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected3 = new Decimal128(-25.346, (short)3); + DecimalUtil.round(0, d3, dcv); + Assert.assertEquals(0, expected3.compareTo(dcv.vector[0])); + + Decimal128 d4 = new Decimal128(-17.234, (short) 5); + Decimal128 expected4 = new Decimal128(-17.234, (short)3); + DecimalUtil.round(0, d4, dcv); + Assert.assertEquals(0, expected4.compareTo(dcv.vector[0])); + + Decimal128 d5 = new Decimal128(19.36748, (short) 5); + Decimal128 expected5 = new Decimal128(19.367, (short)3); + DecimalUtil.round(0, d5, dcv); + Assert.assertEquals(0, expected5.compareTo(dcv.vector[0])); + + Decimal128 d6 = new Decimal128(-25.54537, (short) 5); + Decimal128 expected6 = new Decimal128(-25.545, (short)3); + DecimalUtil.round(0, d6, dcv); + Assert.assertEquals(0, expected6.compareTo(dcv.vector[0])); + } + + @Test + public void testNegate() { + DecimalColumnVector dcv = new DecimalColumnVector(4 ,20, 13); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + Decimal128 expected1 = new Decimal128(-19.56778, (short)5); + DecimalUtil.negate(0, d1, dcv); + Assert.assertEquals(0, expected1.compareTo(dcv.vector[0])); + + Decimal128 d2 = new Decimal128(-25.34567, (short) 5); + Decimal128 expected2 = new Decimal128(25.34567, (short)5); + DecimalUtil.negate(0, d2, dcv); + Assert.assertEquals(0, expected2.compareTo(dcv.vector[0])); + } + + @Test + public void testSign() { + LongColumnVector lcv = new LongColumnVector(4); + Decimal128 d1 = new Decimal128(19.56778, (short) 5); + DecimalUtil.sign(0, d1, lcv); + Assert.assertEquals(1, lcv.vector[0]); + + Decimal128 d2 = new Decimal128(-25.34567, (short) 5); + DecimalUtil.sign(0, d2, lcv); + Assert.assertEquals(-1, lcv.vector[0]); + + Decimal128 d3 = new Decimal128(0, (short) 5); + DecimalUtil.sign(0, d3, lcv); + Assert.assertEquals(0, lcv.vector[0]); + } +} \ No newline at end of file diff --git ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q new file mode 100644 index 0000000..6e2c0b1 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q @@ -0,0 +1,77 @@ +CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc; +SET hive.vectorized.execution.enabled=true; + +-- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0; + +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0; diff --git ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out new file mode 100644 index 0000000..6b203ac --- /dev/null +++ ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -0,0 +1,203 @@ +PREHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +POSTHOOK: query: CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@decimal_test +PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +PREHOOK: type: QUERY +POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_test + Statistics: Num rows: 12288 Data size: 2201752 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((cbigint % 500) = 0) and (sin(cdecimal1) >= (- 1.0))) (type: boolean) + Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdecimal1 (type: decimal(20,10)), round(cdecimal1, 2) (type: decimal(13,2)), round(cdecimal1) (type: decimal(11,0)), floor(cdecimal1) (type: decimal(11,0)), ceil(cdecimal1) (type: decimal(11,0)), exp(cdecimal1) (type: double), ln(cdecimal1) (type: double), log10(cdecimal1) (type: double), log2(cdecimal1) (type: double), log2((cdecimal1 - 15601.0)) (type: double), log(2.0, cdecimal1) (type: double), power(log2(cdecimal1), 2.0) (type: double), power(log2(cdecimal1), 2.0) (type: double), sqrt(cdecimal1) (type: double), abs(cdecimal1) (type: decimal(38,18)), sin(cdecimal1) (type: double), asin(cdecimal1) (type: double), cos(cdecimal1) (type: double), acos(cdecimal1) (type: double), atan(cdecimal1) (type: double), degrees(cdecimal1) (type: double), radians(cdecimal1) (type: double), cdecimal1 (type: decimal(20,10)), (- cdecimal1) (type: decimal(20,10)), sign(cdecimal1) (type: int), cos(((- sin(log(cdecimal1))) + 3.14159)) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25 + Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2048 Data size: 366958 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + +PREHOOK: query: select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_test +#### A masked pattern was here #### +POSTHOOK: query: select + cdecimal1 + ,Round(cdecimal1, 2) + ,Round(cdecimal1) + ,Floor(cdecimal1) + ,Ceil(cdecimal1) + ,Exp(cdecimal1) + ,Ln(cdecimal1) + ,Log10(cdecimal1) + -- Use log2 as a representative function to test all input types. + ,Log2(cdecimal1) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdecimal1 - 15601.0) + ,Log(2.0, cdecimal1) + ,Pow(log2(cdecimal1), 2.0) + ,Power(log2(cdecimal1), 2.0) + ,Sqrt(cdecimal1) + ,Abs(cdecimal1) + ,Sin(cdecimal1) + ,Asin(cdecimal1) + ,Cos(cdecimal1) + ,ACos(cdecimal1) + ,Atan(cdecimal1) + ,Degrees(cdecimal1) + ,Radians(cdecimal1) + ,Positive(cdecimal1) + ,Negative(cdecimal1) + ,Sign(cdecimal1) + -- Test nesting + ,cos(-sin(log(cdecimal1)) + 3.14159) +from decimal_test +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cdecimal1) >= -1.0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_test +#### A masked pattern was here #### +-119.4594594595 -119.46 -119 -120 -119 1.3164850923493003E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.4594594595 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL