diff --git ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
index d2607167ca..127c90529c 100644
--- ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template Decimal64ColumnCompareDecimal64Column.txt, which covers
* decimal64 comparison expressions between two columns, however output is not produced in
- * a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
index 802b9a6c85..cc7aa8905c 100644
--- ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/Decimal64ColumnCompareDecimal64Scalar.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
/**
* Generated from template Decimal64ColumnCompareDecimal64Scalar.txt, which covers decimal64
* comparison expressions between a column and a scalar, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
* in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
index c8b10b6d6b..009a87724f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/Decimal64ScalarCompareDecimal64Column.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
/**
* Generated from template Decimal64ScalarCompareDecimal64Column.txt, which covers decimal64
* comparison expressions between a scalar and a column, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
* in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
index 47dd42fdad..26fee47106 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterColumnBetween.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
* Generated from template FilterColumnBetween.txt, which covers [NOT] BETWEEN filter
* expressions where a column is [NOT] between one scalar and another.
* Output is not produced in a separate column. The selected vector of the input
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
index be9bbb25ba..cd1222309b 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
index 8b6f978729..eeea48af57 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
index 2eaf0620c0..a88435bc69 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
* Generated from template FilterDTIColumnCompareScalar.txt, which covers comparison
* expressions between a datetime/interval column and a scalar of the same type, however output is not
* produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
index 69f0d6bae4..454e1a73d8 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
* Generated from template FilterDTIScalarCompareColumn.txt, which covers comparison
* expressions between a datetime/interval scalar and a column of the same type,
* however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
index be3add064a..6af237d600 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterDecimal64ColumnCompareDecimal64Column.txt, which covers
* decimal64 comparison expressions between two columns, however output is not produced in
- * a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
index 715d04d4aa..65737e819f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ColumnCompareDecimal64Scalar.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterDecimal64ColumnCompareDecimal64Scalar.txt, which covers decimal64
* comparison expressions between a column and a scalar, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
* in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
index 19041805e5..5c7d001814 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimal64ScalarCompareDecimal64Column.txt
@@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterDecimal64ScalarCompareDecimal64Column.txt, which covers decimal64
* comparison expressions between a scalar and a column, however output is not produced in a
- * separate column. The selected vector of the input {@link VectorizedRowBatch} is updated for
+ * separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for
* in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
index c7cfc4de8b..8ddbd7bd95 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnBetween.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
* Generated from template FilterDecimalColumnBetween.txt, which covers [NOT] BETWEEN filter
* expressions where a column is [NOT] between one scalar and another.
* Output is not produced in a separate column. The selected vector of the input
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
index ae2bb17293..7522ee3fca 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterDecimalColumnCompareColumn.txt, which covers binary comparison
* filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
index 5e59c03d9c..b3728de0a8 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
index 00d0042a8c..4c5c224d3b 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
index 4e78fd6599..9d6dc79d91 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
index 5ae21e69dc..64916dd020 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
index 69cf579c3a..87b4655758 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterScalarCompareTimestampColumn.txt, which covers comparison
* expressions between a long/double scalar and a timestamp column, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
index 2ff9e98b0c..3398a4753e 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterScalarCompareColumn.txt, which covers binary comparison
* expressions between a scalar and a column, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
index 6efa1ca1c7..4ae3ddb585 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
index 97be5f4d7d..5cab792e13 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
index 0c679022b0..657bb7ec2d 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
index 7165eb2365..ac26485d02 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
index 2e7bec796b..5b690620b3 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
index ad5985fdca..5ceb3907b5 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnBetween.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
* Generated from template FilterTimestampColumnBetween.txt, which covers [NOT] BETWEEN filter
* expressions where a column is [NOT] between one scalar and another.
* Output is not produced in a separate column. The selected vector of the input
- * {@link VectorizedRowBatch} is updated for in-place filtering.
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
index 83993343ac..608492a2ac 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
index 57834c21ed..747e95d709 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterTimestampColumnCompareScalar.txt, which covers comparison
* expressions between a timestamp column and a long/double scalar, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
index b855714d95..a1e54a0858 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterTimestampColumnCompareColumn.txt, which covers binary comparison
* filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
index 6a05d77100..5061bc9e77 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
index c1ddc08d01..8a3ad3f0dc 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterScalarCompareColumn.txt, which covers binary comparison
* expressions between a scalar and a column, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
index 36628a7329..f86d72891b 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of timestamp
+ * This is a generated class to evaluate a comparison on a vector of timestamp
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
index 31c443c3a4..ee88e7c967 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
index 48913a3abd..ef4ee9d47f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
index 07b4bf38fd..0332e2cee8 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
index eccbee2f38..4e132b9600 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
index 3cf4a2e787..bba788e707 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
import org.apache.hadoop.hive.ql.metadata.HiveException;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
index 4e29f7e792..88babace11 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
index 3d2d280289..3b35ba96be 100644
--- ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
index 4cd8a61c8f..73e1d065aa 100644
--- ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
+++ ql/src/java/org/apache/hadoop/hive/llap/LlapArrowRecordWriter.java
@@ -39,7 +39,7 @@
* calls to the {@link #write(Writable, Writable)} method only serve as a signal that
* a new batch has been loaded to the associated VectorSchemaRoot.
* Payload data for writing is indirectly made available by reference:
- * ArrowStreamWriter -> VectorSchemaRoot -> List
+ * ArrowStreamWriter -> VectorSchemaRoot -> List<FieldVector>
* i.e. both they key and value are ignored once a reference to the VectorSchemaRoot
* is obtained.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
index a51b7e750b..0b7166b7e8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
+++ ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java
@@ -412,10 +412,6 @@ public static QueryResultsCache getInstance() {
/**
* Check if the cache contains an entry for the requested LookupInfo.
* @param request
- * @param addReader Should the reader count be incremented during the lookup.
- * This will ensure the returned entry can be used after the lookup.
- * If true, the caller will be responsible for decrementing the reader count
- * using CacheEntry.releaseReader().
* @return The cached result if there is a match in the cache, or null if no match is found.
*/
public CacheEntry lookup(LookupInfo request) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index df8441727d..bb89f803d5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -132,8 +132,8 @@ private void updatePaths(Path tp, Path ttp) {
/**
* Fixes tmpPath to point to the correct partition. Initialize operator will
* set tmpPath and taskTmpPath based on root table directory. So initially,
- * tmpPath will be /_tmp.-ext-10000 and taskTmpPath will be
- * /_task_tmp.-ext-10000. The depth of these two paths will be 0.
+ * tmpPath will be <prefix>/_tmp.-ext-10000 and taskTmpPath will be
+ * <prefix>/_task_tmp.-ext-10000. The depth of these two paths will be 0.
* Now, in case of dynamic partitioning or list bucketing the inputPath will
* have additional sub-directories under root table directory. This function
* updates the tmpPath and taskTmpPath to reflect these additional
@@ -146,10 +146,10 @@ private void updatePaths(Path tp, Path ttp) {
* Note: The path difference between inputPath and tmpDepth can be DP or DP+LB.
* This method will automatically handle it.
*
- * Continuing the example above, if inputPath is /-ext-10000/hr=a1/,
+ * Continuing the example above, if inputPath is <prefix>/-ext-10000/hr=a1/,
* newPath will be hr=a1/. Then, tmpPath and taskTmpPath will be updated to
- * /-ext-10000/hr=a1/_tmp.ext-10000 and
- * /-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
+ * <prefix>/-ext-10000/hr=a1/_tmp.ext-10000 and
+ * <prefix>/-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
* We have list_bucket_dml_6.q cover this case: DP + LP + multiple skewed
* values + merge.
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index 1c32588076..2d76848413 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -72,8 +72,8 @@
* evaluated before emitting rows. Currently, relevant only for outer joins.
*
* For instance, given the query:
- * select * from t1 right outer join t2 on t1.c1 + t2.c2 > t1.c3;
- * The expression evaluator for t1.c1 + t2.c2 > t1.c3 will be stored in this list.
+ * select * from t1 right outer join t2 on t1.c1 + t2.c2 > t1.c3;
+ * The expression evaluator for t1.c1 + t2.c2 > t1.c3 will be stored in this list.
*/
protected transient List residualJoinFilters;
@@ -448,21 +448,21 @@ protected long getNextSize(long sz) {
* a = 100, 10 | 100, 20 | 100, 30
* b = 100, 10 | 100, 20 | 100, 30
*
- * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
- * 0(a) = [1(b),1] : a.v>10
- * 1(b) = [0(a),1] : b.v>30
+ * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
+ * 0(a) = [1(b),1] : a.v>10
+ * 1(b) = [0(a),1] : b.v>30
*
* for filtered rows in a (100,10) create a-NULL
* for filtered rows in b (100,10) (100,20) (100,30) create NULL-b
*
- * with 0(a) = [1(b),1] : a.v>10
+ * with 0(a) = [1(b),1] : a.v>10
* 100, 10 = 00000010 (filtered)
* 100, 20 = 00000000 (valid)
* 100, 30 = 00000000 (valid)
* -------------------------
* sum = 00000000 : for valid rows in b, there is at least one pair in a
*
- * with 1(b) = [0(a),1] : b.v>30
+ * with 1(b) = [0(a),1] : b.v>30
* 100, 10 = 00000001 (filtered)
* 100, 20 = 00000001 (filtered)
* 100, 30 = 00000001 (filtered)
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
index 2bbcef1f48..66206874ec 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
@@ -28,8 +28,8 @@
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
/**
- * The class implements the method resolution for operators like (> < <= >= =
- * <>). The resolution logic is as follows: 1. If one of the parameters is null,
+ * The class implements the method resolution for operators like (> < <= >= =
+ * <>). The resolution logic is as follows: 1. If one of the parameters is null,
* then it resolves to evaluate(Double, Double) 2. If both of the parameters are
* of type T, then it resolves to evaluate(T, T) 3. If 1 and 2 fails then it
* resolves to evaluate(Double, Double).
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index cb7fdf73b5..5c5f0571b4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -5089,10 +5089,6 @@ public String getName() {
*
* @param databaseName
* Database name.
- * @param sd
- * Storage descriptor.
- * @param name
- * Object name.
*/
public static void makeLocationQualified(String databaseName, Table table, HiveConf conf) throws HiveException {
Path path = null;
@@ -5114,13 +5110,13 @@ public static void makeLocationQualified(String databaseName, Table table, HiveC
}
}
+ public static final String DATABASE_PATH_SUFFIX = ".db";
/**
* Make qualified location for a database .
*
* @param database
* Database.
*/
- public static final String DATABASE_PATH_SUFFIX = ".db";
private void makeLocationQualified(Database database) throws HiveException {
if (database.isSetLocationUri()) {
database.setLocationUri(Utilities.getQualifiedPath(conf, new Path(database.getLocationUri())));
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
index d66384aed4..2c630019ad 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
@@ -36,9 +36,9 @@
* Consider a query like:
*
* select * from
- * (subq1 --> has a filter)
+ * (subq1 --> has a filter)
* join
- * (subq2 --> has a filter)
+ * (subq2 --> has a filter)
* on some key
*
* Let us assume that subq1 is the small table (either specified by the user or inferred
@@ -50,12 +50,12 @@
*
* Therefore the following operator tree is created:
*
- * TableScan (subq1) --> Select --> Filter --> DummyStore
+ * TableScan (subq1) --> Select --> Filter --> DummyStore
* \
* \ SMBJoin
* /
* /
- * TableScan (subq2) --> Select --> Filter
+ * TableScan (subq2) --> Select --> Filter
*
* In order to fetch the row with the least join key from the small table, the row from subq1
* is partially processed, and stored in DummyStore. For the actual processing of the join,
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 79e41d9178..cd13397a79 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -383,8 +383,8 @@ public void addToStat(String statType, long amount) {
* FileSink, in ways similar to the multi file spray, but without knowing the total number of
* buckets ahead of time.
*
- * ROW__ID (1,2[0],3) => bucket_00002
- * ROW__ID (1,3[0],4) => bucket_00003 etc
+ * ROW__ID (1,2[0],3) => bucket_00002
+ * ROW__ID (1,3[0],4) => bucket_00003 etc
*
* A new FSP is created for each partition, so this only requires the bucket numbering and that
* is mapped in directly as an index.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index e7aa041c25..641c35c38c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -723,8 +723,8 @@ static int getCommonLength(int aLen, int bLen) {
* return a TypeInfo corresponding to the common PrimitiveCategory, and with type qualifiers
* (if applicable) that match the 2 TypeInfo types.
* Examples:
- * varchar(10), varchar(20), primitive category varchar => varchar(20)
- * date, string, primitive category string => string
+ * varchar(10), varchar(20), primitive category varchar => varchar(20)
+ * date, string, primitive category string => string
* @param a TypeInfo of the first type
* @param b TypeInfo of the second type
* @param typeCategory PrimitiveCategory of the designated common type between a and b
@@ -1379,7 +1379,6 @@ public static Method getMethodInternal(Class> udfClass, List mlist, bo
/**
* A shortcut to get the "index" GenericUDF. This is used for getting elements
* out of array and getting values out of map.
- * @throws SemanticException
*/
public static GenericUDF getGenericUDFForIndex() {
try {
@@ -1391,7 +1390,6 @@ public static GenericUDF getGenericUDFForIndex() {
/**
* A shortcut to get the "and" GenericUDF.
- * @throws SemanticException
*/
public static GenericUDF getGenericUDFForAnd() {
try {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
index 6585b19b6b..15a2cbc769 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
@@ -64,7 +64,7 @@
*
* The output of select in the left branch and output of the UDTF in the right
* branch are then sent to the lateral view join (LVJ). In most cases, the UDTF
- * will generate > 1 row for every row received from the TS, while the left
+ * will generate > 1 row for every row received from the TS, while the left
* select operator will generate only one. For each row output from the TS, the
* LVJ outputs all possible rows that can be created by joining the row from the
* left select and one of the rows output from the UDTF.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
index 5c502e1f45..01d0392910 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
@@ -264,7 +264,7 @@ public void tryStoreVectorizedKey(HiveKey key, boolean partColsIsNull, int batch
/**
* Get vectorized batch result for particular index.
* @param batchIndex index of the key in the batch.
- * @return the result, same as from {@link #tryStoreKey(HiveKey)}
+ * @return the result, same as from {@link TopNHash#tryStoreKey(HiveKey,boolean)}
*/
public int getVectorizedBatchResult(int batchIndex) {
int result = batchIndexToResult[batchIndex];
@@ -309,9 +309,8 @@ public int getVectorizedKeyHashCode(int batchIndex) {
/**
* Stores the value for the key in the heap.
* @param index The index, either from tryStoreKey or from tryStoreVectorizedKey result.
- * @param hasCode hashCode of key, used by ptfTopNHash.
+ * @param hashCode hashCode of key, used by ptfTopNHash.
* @param value The value to store.
- * @param keyHash The key hash to store.
* @param vectorized Whether the result is coming from a vectorized batch.
*/
public void storeValue(int index, int hashCode, BytesWritable value, boolean vectorized) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
index 63ddb6b66a..dc33a08a35 100755
--- ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
@@ -20,9 +20,9 @@
/**
* Please see the deprecation notice
- *
+ *
* Base class for all User-defined Aggregation Function (UDAF) classes.
- *
+ *
* Requirements for a UDAF class:
*
* - Implement the {@code init()} method, which resets the status of the aggregation function.
@@ -57,7 +57,7 @@
* aggregation result and returns a boolean. The method should always return
* {@code true} on valid inputs, or the framework will throw an Exception.
*
- *
+ *
* Following are some examples:
*
* - public int evaluatePartial();
@@ -65,7 +65,6 @@
* - public String evaluatePartial();
* - public boolean aggregatePartial(String partial);
*
- *
*
* @deprecated Either implement {@link org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver2} or extend
* {@link org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver} instead.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 8937b43811..78ddcb3c30 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2170,7 +2170,7 @@ public static String formatBinaryString(byte[] array, int start, int length) {
* If there is no db name part, set the current sessions default db
* @param dbtable
* @return String array with two elements, first is db name, second is table name
- * @throws HiveException
+ * @throws SemanticException
*/
public static String[] getDbTableName(String dbtable) throws SemanticException {
return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable);
@@ -3864,9 +3864,9 @@ public static String getQualifiedPath(HiveConf conf, Path path) throws HiveExcep
}
/**
- * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
+ * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
* @param conf Hive configuration.
- * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
+ * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
* Else, false.
*/
public static boolean isPerfOrAboveLogging(HiveConf conf) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
index 671fb9563b..c686f72ea2 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
@@ -28,7 +28,7 @@
*
* Conditions to check:
*
- * 1. "Script failed with code " is in the log
+ * 1. "Script failed with code <some number>" is in the log
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
index a6b0dbc0dc..dbf75b426d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
@@ -493,7 +493,7 @@ public byte getValueResult(byte[] key, int offset, int length, Result hashMapRes
}
/**
- * Take the segment reference from {@link #getValueRefs(byte[], int, List)}
+ * Take the segment reference from getValueRefs(byte[],int,List)
* result and makes it self-contained - adds byte array where the value is stored, and
* updates the offset from "global" write buffers offset to offset within that array.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
index b75ebcbbca..9dfcbfae55 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/FunctionEvent.java
@@ -24,8 +24,8 @@
*
* Since the bootstrap and incremental for functions is handled similarly. There
* is additional work to make sure we pass the event object from both places.
- *
- * @see org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler.FunctionDescBuilder
+ *
+ * FunctionDescBuilder in {@link org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler}
* would be merged here mostly.
*/
public interface FunctionEvent extends BootstrapEvent {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
index ef6e31f2a6..f1d7563d72 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java
@@ -61,7 +61,7 @@
* 2. Table before partition is not explicitly required as table and partition metadata are in the same file.
*
*
- * For future integrations other sources of events like kafka, would require to implement an Iterator
+ * For future integrations other sources of events like kafka, would require to implement an Iterator<BootstrapEvent>
*
*/
public class BootstrapEventsIterator implements Iterator {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
index 1d01bc9cd2..20ede9c406 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/TaskTracker.java
@@ -30,7 +30,7 @@
/**
* This class will be responsible to track how many tasks have been created,
* organization of tasks such that after the number of tasks for next execution are created
- * we create a dependency collection task(DCT) -> another bootstrap task,
+ * we create a dependency collection task(DCT) -> another bootstrap task,
* and then add DCT as dependent to all existing tasks that are created so the cycle can continue.
*/
public class TaskTracker {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index d9340d0371..ba1b47ebbf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -1012,7 +1012,7 @@ public Path getDefaultDestDir(Configuration conf) throws LoginException, IOExcep
* to provide on the cluster as resources for execution.
*
* @param conf
- * @return List local resources to add to execution
+ * @return List<LocalResource> local resources to add to execution
* @throws IOException when hdfs operation fails
* @throws LoginException when getDefaultDestDir fails with the same exception
*/
@@ -1097,7 +1097,7 @@ private void addHdfsResource(Configuration conf, List tmpResource
* @param hdfsDirPathStr Destination directory in HDFS.
* @param conf Configuration.
* @param inputOutputJars The file names to localize.
- * @return List local resources to add to execution
+ * @return List<LocalResource> local resources to add to execution
* @throws IOException when hdfs operation fails.
* @throws LoginException when getDefaultDestDir fails with the same exception
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
index 39a9c772a6..b6c0d7f3d8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
@@ -47,7 +47,7 @@
* A simple sleep processor implementation that sleeps for the configured
* time in milliseconds.
*
- * @see Config for configuring the HivePreWarmProcessor
+ * @see Configuration for configuring the HivePreWarmProcessor
*/
public class HivePreWarmProcessor extends AbstractLogicalIOProcessor {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index 08e65a4a6d..767b359219 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -222,11 +222,6 @@ public boolean isOpen() {
return true;
}
-
- /**
- * Get all open sessions. Only used to clean up at shutdown.
- * @return List
- */
public static String makeSessionId() {
return UUID.randomUUID().toString();
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
index 417beec61a..48b57645b9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
@@ -47,22 +47,22 @@
*
* (Notice the these names are a subset of GroupByDesc.Mode...)
*
- * PARTIAL1 Original data --> Partial aggregation data
+ * PARTIAL1 Original data --> Partial aggregation data
*
- * PARTIAL2 Partial aggregation data --> Partial aggregation data
+ * PARTIAL2 Partial aggregation data --> Partial aggregation data
*
- * FINAL Partial aggregation data --> Full aggregation data
+ * FINAL Partial aggregation data --> Full aggregation data
*
- * COMPLETE Original data --> Full aggregation data
+ * COMPLETE Original data --> Full aggregation data
*
*
- * SIMPLEST CASE --> The data type/semantics of original data, partial aggregation
+ * SIMPLEST CASE --> The data type/semantics of original data, partial aggregation
* data, and full aggregation data ARE THE SAME. E.g. MIN, MAX, SUM. The different
* modes can be handled by one aggregation class.
*
* This case has a null for the Mode.
*
- * FOR OTHERS --> The data type/semantics of partial aggregation data and full aggregation data
+ * FOR OTHERS --> The data type/semantics of partial aggregation data and full aggregation data
* ARE THE SAME but different than original data. This results in 2 aggregation classes:
*
* 1) A class that takes original rows and outputs partial/full aggregation
@@ -75,7 +75,7 @@
*
* E.g. COUNT(*) and COUNT(column)
*
- * OTHERWISE FULL --> The data type/semantics of partial aggregation data is different than
+ * OTHERWISE FULL --> The data type/semantics of partial aggregation data is different than
* original data and full aggregation data.
*
* E.g. AVG uses a STRUCT with count and sum for partial aggregation data. It divides
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index fa056e9212..83e41a37cc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -323,7 +323,6 @@ private ColumnVector createColumnVectorFromRowColumnTypeInfos(int columnNum) {
* Creates a Vectorized row batch and the column vectors.
*
* @return VectorizedRowBatch
- * @throws HiveException
*/
public VectorizedRowBatch createVectorizedRowBatch()
{
@@ -381,7 +380,6 @@ public VectorizedRowBatch createVectorizedRowBatch()
*
* @param batch
* @param partitionValues
- * @throws HiveException
*/
public void addPartitionColsToBatch(VectorizedRowBatch batch, Object[] partitionValues)
{
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
index 9de2e92fbd..f9a86ae856 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
@@ -24,7 +24,7 @@
* A high-performance set implementation used to support fast set membership testing,
* using Cuckoo hashing. This is used to support fast tests of the form
*
- * column IN (
+ * <5 0's for Next Relative Offset> <Key Bytes> <Value Length> <Value Bytes>
* NEXT (NONE) KEY VALUE
*
* NOTE: AbsoluteOffset.byteLength = 5
@@ -76,7 +76,7 @@
* ---------------------------------
* |
* v
- * <5 0's for Next Relative Offset> [Big Key Length]
+ * <5 0's for Next Relative Offset> [Big Key Length] <Key Bytes> <Value Length> <Value Bytes>
* NEXT (NONE) optional KEY VALUE
*
* 3) Two elements when key length is small and stored in reference word:
@@ -88,7 +88,7 @@
* ------------------------------------
* |
* v
- *
+ * <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Bytes>
* | NEXT KEY VALUE
* |
* | first record absolute offset + relative offset
@@ -96,7 +96,7 @@
* --------
* |
* v
- * <5 0's Padding for Next Value Ref>
+ * <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
* NEXT (NONE) VALUE
*
* 4) Three elements showing how first record updated to point to new value and
@@ -109,20 +109,20 @@
* ------------------------------------
* |
* v
- *
+ * <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Bytes>
* | NEXT KEY VALUE
* |
* | first record absolute offset + relative offset
* |
* |
- * | <5 0's Padding for Next Value Ref>
+ * | <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
* | ^ NEXT (NONE) VALUE
* | |
* | ------
* | |
* | | new record absolute offset - (minus) relative offset
* | |
- * ----->
+ * -----><Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
* NEXT VALUE
*
*
@@ -136,26 +136,26 @@
* ------------------------------------
* |
* v
- *
+ * <Next Value Rel Offset as 5 bytes> <Key Bytes> <Value Length> <Value Bytes>
* | NEXT KEY VALUE
* |
* | first record absolute offset + relative offset
* |
* |
- * | <5 0's Padding for Next Value Ref>
+ * | <5 0's Padding for Next Value Ref> <Value Length> <Value Bytes>
* | ^ NEXT (NONE) VALUE
* | |
* | ------
* | | record absolute offset - (minus) relative offset
* | |
- * |
+ * | <Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
* | ^ NEXT VALUE
* | |
* | ------
* | |
* | | new record absolute offset - (minus) relative offset
* | |
- * ----->
+ * -----><Next Value Rel Offset as 5 bytes> <Value Length> <Value Bytes>
* NEXT VALUE
*
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
index 20fa03a03b..db103b620f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashMultiSetStore.java
@@ -58,7 +58,7 @@
* --------------------------------------
* |
* v
- * <4 bytes's for set membership count>
+ * <4 bytes's for set membership count> <Key Bytes>
* COUNT KEY
*
* NOTE: MultiSetCount.byteLength = 4
@@ -72,7 +72,7 @@
* -------------------------------------
* |
* v
- * <4 byte's for set membership count> [Big Key Length]
+ * <4 byte's for set membership count> [Big Key Length] <Key Bytes>
* NEXT (NONE) optional KEY
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
index 1a78688d7f..d95722d623 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastBytesHashSetStore.java
@@ -57,7 +57,7 @@
* |
* |
* v
- *
+ * <Key Bytes>
* KEY
*
* 2) One element, general: shows optional big key length.
@@ -68,7 +68,7 @@
* |
* |
* v
- * [Big Key Length]
+ * [Big Key Length] <Key Bytes>
* optional KEY
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
index 8bf2ccb164..1bb224917e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperBase.java
@@ -35,7 +35,7 @@
* A hash map key wrapper for vectorized processing.
* It stores the key values as primitives in arrays for each supported primitive type.
* This works in conjunction with
- * {@link org.apache.hadoop.hive.ql.exec.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
+ * {@link VectorHashKeyWrapperBatch}
* to hash vectorized processing units (batches).
*/
public abstract class VectorHashKeyWrapperBase extends KeyWrapper {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
index 8fe53e7bc3..c605ce3afd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/wrapper/VectorHashKeyWrapperGeneral.java
@@ -44,7 +44,7 @@
* A hash map key wrapper for vectorized processing.
* It stores the key values as primitives in arrays for each supported primitive type.
* This works in conjunction with
- * {@link org.apache.hadoop.hive.ql.exec.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
+ * {@link VectorHashKeyWrapperBatch}
* to hash vectorized processing units (batches).
*/
public class VectorHashKeyWrapperGeneral extends VectorHashKeyWrapperBase {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
index b45cc8c874..e6b8490bda 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
@@ -199,7 +199,7 @@ public Options bucket(int bucket) {
* Multiple inserts into legacy (pre-acid) tables can generate multiple copies of each bucket
* file.
* @see org.apache.hadoop.hive.ql.exec.Utilities#COPY_KEYWORD
- * @param copyNumber the number of the copy ( > 0)
+ * @param copyNumber the number of the copy ( > 0)
* @return this
*/
public Options copyNumber(int copyNumber) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
index bdd16c532d..eb9ded7e78 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/BucketCodec.java
@@ -72,7 +72,7 @@ public int encode(AcidOutputFormat.Options options) {
* by {@link RecordIdentifier} which includes the {@link RecordIdentifier#getBucketProperty()}
* which has the actual bucket ID in the high order bits. This scheme also ensures that
* {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator#process(Object, int)} works in case
- * there numBuckets > numReducers. (The later could be fixed by changing how writers are
+ * there numBuckets > numReducers. (The later could be fixed by changing how writers are
* initialized in "if (fpaths.acidLastBucket != bucketNum) {")
*/
V1(1) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
index 41d90161e3..7a49121b6c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java
@@ -121,11 +121,6 @@ public Configuration getConf() {
return conf;
}
- /**
- * @return the actual class being deserialized.
- * @exception does
- * not currently throw IOException
- */
@Override
public Class getRealClass() throws IOException {
return (Class) conf.getClass(SerializationSubclassKey, null,
@@ -145,8 +140,6 @@ public Configuration getConf() {
* deserialized; in this context, that assumption isn't necessarily true.
*
* @return the serialization object for this context
- * @exception does
- * not currently throw any IOException
*/
@Override
public Serialization getSerialization() throws IOException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 60833bf8ea..11876fbb10 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -50,7 +50,7 @@
* data. The binary search can be used by setting the value of inputFormatSorted in the
* MapreduceWork to true, but it should only be used if the data is going to a FilterOperator,
* which filters by comparing a value in the data with a constant, using one of the comparisons
- * =, <, >, <=, >=. If the RecordReader's underlying format is an RCFile, this object can perform
+ * =, <, >, <=, >=. If the RecordReader's underlying format is an RCFile, this object can perform
* a binary search to find the block to begin reading from, and stop reading once it can be
* determined no other entries will match the filter.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
index 8746a204ff..9d05510820 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
@@ -37,8 +37,8 @@
import org.apache.hadoop.util.Progressable;
/**
- * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the to TextOutputFormat.RecordWriter.
+ * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the <key,
+ * value> to TextOutputFormat.RecordWriter.
*
*/
public class HiveIgnoreKeyTextOutputFormat
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
index c221579b5e..a3bddbf35b 100755
--- ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.util.Progressable;
/**
- * This class replaces key with null before feeding the to
+ * This class replaces key with null before feeding the <key, value> to
* TextOutputFormat.RecordWriter.
*
* @deprecated use {@link HiveIgnoreKeyTextOutputFormat} instead}
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
index b7c990baa0..3e45e45b27 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
@@ -99,10 +99,10 @@
* The {@link Reader} is used to read and explain the bytes of RCFile.
*
*
- *
+ *
*
*
- *
+ *
*
* - version - 3 bytes of magic header RCF, followed by 1 byte of
* actual version number (e.g. RCF1)
@@ -114,10 +114,10 @@
* - sync - A sync marker to denote end of the header.
*
*
- * RCFile Format
+ * RCFile Format
*
* - Header
- * - Record
+ *
- Record
* - Key part
*
* - Record length in bytes
@@ -133,7 +133,6 @@
* - ...
*
*
- *
* - Value part
*
* - Compressed or plain data of [column_1_row_1_value,
@@ -143,7 +142,6 @@
*
*
*
- *
*
* {@code
* The following is a pseudo-BNF grammar for RCFile. Comments are prefixed
@@ -336,7 +334,6 @@
* Text ::= VInt, Chars (Length prefixed UTF-8 characters)
* }
*
- *
*/
public class RCFile {
@@ -1095,7 +1092,7 @@ private void checkAndWriteSync() throws IOException {
private int columnBufferSize = 0;
/**
- * Append a row of values. Currently it only can accept <
+ * Append a row of values. Currently it only can accept <
* {@link BytesRefArrayWritable}. If its size() is less than the
* column number in the file, zero bytes are appended for the empty columns.
* If its size() is greater then the column number in the file, the exceeded
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
index 07abd378c5..3044603918 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
@@ -144,7 +144,7 @@ public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
/**
* alter table ... concatenate
- *
+ *
* If it is skewed table, use subdirectories in inputpaths.
*/
public void resolveConcatenateMerge(HiveConf conf) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 6d4578e7a0..a08c8bc3ab 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -63,8 +63,8 @@
/**
* A RecordUpdater where the files are stored as ORC.
* A note on various record structures: the {@code row} coming in (as in {@link #insert(long, Object)}
- * for example), is a struct like but what is written to the file
- * * is > (see {@link #createEventSchema(ObjectInspector)})
+ * for example), is a struct like <RecordIdentifier, f1, ... fn> but what is written to the file
+ * * is <op, owid, writerId, rowid, cwid, <f1, ... fn>> (see {@link #createEventObjectInspector(ObjectInspector)})
* So there are OIs here to make the translation.
*/
public class OrcRecordUpdater implements RecordUpdater {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index 6d1ca7227d..eb46b5ef1f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -797,10 +797,10 @@ private static boolean areRowIdsProjected(VectorizedRowBatchCtx rbCtx) {
/**
* There are 2 types of schema from the {@link #baseReader} that this handles. In the case
* the data was written to a transactional table from the start, every row is decorated with
- * transaction related info and looks like >.
+ * transaction related info and looks like <op, owid, writerId, rowid, cwid, <f1, ... fn>>.
*
* The other case is when data was written to non-transactional table and thus only has the user
- * data: . Then this table was then converted to a transactional table but the data
+ * data: <f1, ... fn>. Then this table was then converted to a transactional table but the data
* files are not changed until major compaction. These are the "original" files.
*
* In this case we may need to decorate the outgoing data with transactional column values at
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
index f3699f9ccf..2359e8cf34 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReader.java
@@ -36,7 +36,7 @@
* @param encodings Externally provided metadata (from metadata reader or external cache).
* @param streams Externally provided metadata (from metadata reader or external cache).
* @param physicalFileIncludes The array of booleans indicating whether each column should be read.
- * @param colRgs Arrays of rgs, per column set to true in included, that are to be read.
+ * @param rgs Arrays of rgs, per column set to true in included, that are to be read.
* null in each respective position means all rgs for this column need to be read.
* @param consumer The sink for data that has been read.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
index b6f68c937e..450f008740 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
@@ -43,7 +43,7 @@
* Support for additional generic compression: LZO, SNAPPY, ZLIB.
*
*
- *
+ *
* Format:
*
* {@code
@@ -54,9 +54,8 @@
* PS LENGTH (1 byte)
* }
*
- *
*
- *
+ *
* Stripe:
*
* {@code
@@ -65,6 +64,5 @@
* STRIPE-FOOTER
* }
*
- *
*/
package org.apache.hadoop.hive.ql.io.orc;
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
index d83376d3ff..ba697959d2 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
@@ -36,7 +36,7 @@
/**
*
- * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.
+ * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.
* It can also inspect a List if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
index 75b1ad1581..3d4900686b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.io.Writable;
/**
- * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
- * It can also inspect a Map if Hive decides to inspect the result of an inspection.
+ * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * It can also inspect a Map if Hive decides to inspect the result of an inspection.
* When trying to access elements from the map it will iterate over all keys, inspecting them and comparing them to the
* desired key.
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index aec7423117..8da396ea70 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -23,7 +23,7 @@
import org.apache.hadoop.io.Writable;
/**
- * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.
+ * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.
* It can also inspect a List if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
index 2cb2debcbd..1f28bb6ca8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.io.Writable;
/**
- * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
* It can also inspect a Map if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
index dc854d9daa..8fbe9d059c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
@@ -30,7 +30,7 @@
* Gets the vector of children nodes. This is used in the graph walker
* algorithms.
*
- * @return List extends Node>
+ * @return List<? extends Node>
*/
List extends Node> getChildren();
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
index 9ddfe683a8..10409b67e9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
@@ -36,13 +36,13 @@
* The rule specified as operator names separated by % symbols, the left side represents the
* bottom of the stack.
*
- * E.g. TS%FIL%RS -> means
+ * E.g. TS%FIL%RS -> means
* TableScan Node followed by Filter followed by ReduceSink in the tree, or, in terms of the
* stack, ReduceSink on top followed by Filter followed by TableScan
*
* @param ruleName
* name of the rule
- * @param regExp
+ * @param pattern
* string specification of the rule
**/
public RuleExactMatch(String ruleName, String[] pattern) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
index 1ab8cd8934..db62db2c40 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
@@ -90,7 +90,7 @@ private static boolean patternHasOnlyWildCardChar(String pattern, char wcc) {
/**
* The rule specified by the regular expression. Note that, the regular
- * expression is specified in terms of Node name. For eg: TS.*RS -> means
+ * expression is specified in terms of Node name. For eg: TS.*RS -> means
* TableScan Node followed by anything any number of times followed by
* ReduceSink
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
index 12c10273a9..201ee3e804 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
@@ -178,7 +178,7 @@ void replTableWriteIdState(String validWriteIdList, String dbName, String tableN
* {@link ValidTxnWriteIdList} object can be passed as string to the processing
* tasks for use in the reading the data. This call will return same results as long as validTxnString
* passed is same.
- * @param tableList list of tables (.) read/written by current transaction.
+ * @param tableList list of tables (<db_name>.<table_name>) read/written by current transaction.
* @param validTxnList snapshot of valid txns for the current txn
* @return list of valid table write Ids.
* @throws LockException
diff --git ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
index c49f53fd3c..8dc5eb030d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
@@ -26,13 +26,13 @@
import org.apache.logging.log4j.core.pattern.ConverterKeys;
/**
- * FilePattern converter that converts %pid pattern to @ information
+ * FilePattern converter that converts %pid pattern to <process-id>@<hostname> information
* obtained at runtime.
*
* Example usage:
- *
+ * <RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz">
*
- * Will generate output file with name containing @ like below
+ * Will generate output file with name containing <process-id>@<hostname> like below
* test.log.95232@localhost.gz
*/
@Plugin(name = "PidFilePatternConverter", category = "FileConverter")
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 9ab3a9ecb6..1adbb44fb5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -432,7 +432,7 @@ public void setAllowClose(boolean allowClose) {
/**
* Gets the allowClose flag which determines if it is allowed to close metastore connections.
- * @returns allowClose flag
+ * @return allowClose flag
*/
public boolean allowClose() {
return isAllowClose;
@@ -648,9 +648,7 @@ public void alterTable(Table newTbl, boolean cascade, EnvironmentContext environ
* new name of the table. could be the old name
* @param transactional
* Need to generate and save a table snapshot into the metastore?
- * @throws InvalidOperationException
- * if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext,
boolean transactional)
@@ -737,7 +735,7 @@ public void updateCreationMetadata(String dbName, String tableName, CreationMeta
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
@Deprecated
public void alterPartition(String tblName, Partition newPart,
@@ -762,7 +760,7 @@ public void alterPartition(String tblName, Partition newPart,
* indicates this call is for transaction stats
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterPartition(String catName, String dbName, String tblName, Partition newPart,
EnvironmentContext environmentContext, boolean transactional)
@@ -819,7 +817,7 @@ private void validatePartition(Partition newPart) throws HiveException {
* Need to generate and save a table snapshot into the metastore?
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterPartitions(String tblName, List newParts,
EnvironmentContext environmentContext, boolean transactional)
@@ -862,9 +860,7 @@ public void alterPartitions(String tblName, List newParts,
* spec of old partition
* @param newPart
* new partition
- * @throws InvalidOperationException
- * if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart)
throws HiveException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
index 6418bd53a9..e635670932 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
@@ -25,11 +25,11 @@
import java.util.Map;
/**
- * PartitionIterable - effectively a lazy Iterable
+ * PartitionIterable - effectively a lazy Iterable<Partition>
*
* Sometimes, we have a need for iterating through a list of partitions,
* but the list of partitions can be too big to fetch as a single object.
- * Thus, the goal of PartitionIterable is to act as an Iterable
+ * Thus, the goal of PartitionIterable is to act as an Iterable<Partition>
* while lazily fetching each relevant partition, one after the other as
* independent metadata calls.
*
@@ -134,7 +134,7 @@ public void remove() {
/**
* Dummy constructor, which simply acts as an iterator on an already-present
* list of partitions, allows for easy drop-in replacement for other methods
- * that already have a List
+ * that already have a List<Partition>
*/
public PartitionIterable(Collection ptnsProvided){
this.currType = Type.LIST_PROVIDED;
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index cd483eb6cc..fb1c8d41a2 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -681,7 +681,7 @@ private boolean isField(String col) {
* Returns a list of all the columns of the table (data columns + partition
* columns in that order.
*
- * @return List
+ * @return List<FieldSchema>
*/
public List getAllCols() {
ArrayList f_list = new ArrayList();
@@ -919,7 +919,7 @@ public boolean isMaterializedView() {
}
/**
- * Creates a partition name -> value spec map object
+ * Creates a partition name -> value spec map object
*
* @param tp
* Use the information from this partition.
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
index 936a80870d..0cdf00b0bf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
@@ -86,7 +86,6 @@ public void showMaterializedViews(DataOutputStream out, List materialized
* @param cols
* @param isFormatted - describe with formatted keyword
* @param isExt
- * @param isPretty
* @param isOutputPadded - if true, add spacing and indentation
* @param colStats
* @param fkInfo foreign keys information
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index 60d56e9311..d5f51bfc9c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -303,7 +303,7 @@ public static ColumnPrunerScriptProc getScriptProc() {
* - add column names referenced in WindowFn args and in WindowFn expressions
* to the pruned list of the child Select Op.
* - finally we set the prunedColList on the ColumnPrunerContx;
- * and update the RR & signature on the PTFOp.
+ * and update the RR & signature on the PTFOp.
*/
public static class ColumnPrunerPTFProc extends ColumnPrunerScriptProc {
@Override
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index d9686b0370..a0482537ae 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -47,7 +47,7 @@
/**
* This class implements the processor context for Constant Propagate.
*
- * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
+ * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
* operator, enabling constants to be revolved across operators.
*/
public class ConstantPropagateProcCtx implements NodeProcessorCtx {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index b539787989..3913bbc17f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -120,7 +120,7 @@ private ConstantPropagateProcFactory() {
/**
* Get ColumnInfo from column expression.
*
- * @param rr
+ * @param rs
* @param desc
* @return
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
index b82b50937e..542d356769 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
@@ -62,14 +62,14 @@
/**
* Queries of form : select max(c), count(distinct c) from T; generates a plan
- * of form TS->mGBy->RS->rGBy->FS This plan suffers from a problem that vertex
- * containing rGBy->FS necessarily need to have 1 task. This limitation results
+ * of form TS->mGBy->RS->rGBy->FS This plan suffers from a problem that vertex
+ * containing rGBy->FS necessarily need to have 1 task. This limitation results
* in slow execution because that task gets all the data. This optimization if
* successful will rewrite above plan to mGby1-rs1-mGby2-mGby3-rs2-rGby1 This
* introduces extra vertex of mGby2-mGby3-rs2. Note this vertex can have
* multiple tasks and since we are doing aggregation, output of this must
* necessarily be smaller than its input, which results in much less data going
- * in to original rGby->FS vertex, which continues to have single task. Also
+ * in to original rGby->FS vertex, which continues to have single task. Also
* note on calcite tree we have HiveExpandDistinctAggregatesRule rule which does
* similar plan transformation but has different conditions which needs to be
* satisfied. Additionally, we don't do any costing here but this is possibly
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
index 782ce1687d..a6e2f53b48 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
@@ -176,8 +176,6 @@ public GenMRProcContext() {
* hive configuration
* @param opTaskMap
* reducer to task mapping
- * @param seenOps
- * operator already visited
* @param parseCtx
* current parse context
* @param rootTasks
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 2131bf131d..d9f675405c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -434,8 +434,8 @@ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx)
* current alias
* @param topOp
* the top operator of the stack
- * @param plan
- * current plan
+ * @param task
+ * current task
* @param local
* whether you need to add to map-reduce or local work
* @param opProcCtx
@@ -454,8 +454,8 @@ public static void setTaskPlan(String alias_id,
* current alias
* @param topOp
* the top operator of the stack
- * @param plan
- * current plan
+ * @param task
+ * current task
* @param local
* whether you need to add to map-reduce or local work
* @param opProcCtx
@@ -476,13 +476,11 @@ public static void setTaskPlan(String alias_id,
*
* @param alias_id
* current alias
- * @param topOp
- * the top operator of the stack
* @param plan
* map work to initialize
* @param local
* whether you need to add to map-reduce or local work
- * @param pList
+ * @param partsList
* pruned partition list. If it is null it will be computed on-the-fly.
* @param inputs
* read entities for the map work
@@ -764,7 +762,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set topOp, MapWork plan, boolean local,
@@ -1254,11 +1252,11 @@ public static void replaceMapWork(String sourceAlias, String targetAlias,
* v
* FileSinkOperator (fsMerge)
*
- * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
+ * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
* do
* not contain the dynamic partitions (their parent). So after the dynamic partitions are
* created (after the first job finished before the moveTask or ConditionalTask start),
- * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
+ * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
* partition
* directories.
*
@@ -1616,8 +1614,8 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf,
*
* @param fsInputDesc
* @param finalName
+ * @param hasDynamicPartitions
* @param ctx
- * @param inputFormatClass
* @return MergeWork if table is stored as RCFile or ORCFile,
* null otherwise
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
index bd0cbab13c..e368570fca 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
@@ -50,11 +50,11 @@
/**
* This optimizer is used to reduce the input size for the query for queries which are
* specifying a limit.
- *
+ *
* For eg. for a query of type:
- *
- * select expr from T where limit 100;
- *
+ *
+ * select expr from T where <filter< limit 100;
+ *
* Most probably, the whole table T need not be scanned.
* Chances are that even if we scan the first file of T, we would get the 100 rows
* needed by this query.
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 5b4b09828b..7c841ba48e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -55,15 +55,15 @@
*
* Without this optimization:
*
- * TS -> FIL -> SEL -> RS ->
- * JOIN -> SEL -> FS
- * TS -> FIL -> SEL -> RS ->
+ * TS -> FIL -> SEL -> RS ->
+ * JOIN -> SEL -> FS
+ * TS -> FIL -> SEL -> RS ->
*
* With this optimization
*
- * TS -> FIL -> RS ->
- * JOIN -> FS
- * TS -> FIL -> RS ->
+ * TS -> FIL -> RS ->
+ * JOIN -> FS
+ * TS -> FIL -> RS ->
*
* Note absence of select operator after filter and after join operator.
* Also, see : identity_proj_remove.q
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
index 1dbe160a27..6cea72fed1 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
@@ -54,7 +54,7 @@
* If RS is only for limiting rows, RSHash counts row with same key separately.
* But if RS is for GBY, RSHash should forward all the rows with the same key.
*
- * Legend : A(a) --> key A, value a, row A(a)
+ * Legend : A(a) --> key A, value a, row A(a)
*
* If each RS in mapper tasks is forwarded rows like this
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index ceeeb8f0a3..1256e1ce58 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -276,11 +276,8 @@ private static void validateMapJoinTypes(Operator extends OperatorDesc> op)
/**
* convert a regular join to a a map-side join.
*
- * @param opParseCtxMap
* @param op
* join operator
- * @param joinTree
- * qb join tree
* @param mapJoinPos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
@@ -624,11 +621,8 @@ private static boolean needValueIndex(int[] valueIndex) {
/**
* convert a sortmerge join to a a map-side join.
*
- * @param opParseCtxMap
* @param smbJoinOp
* join operator
- * @param joinTree
- * qb join tree
* @param bigTablePos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
@@ -798,7 +792,6 @@ public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator o
* @param mapJoinPos the position of big table as determined by either hints or auto conversion.
* @param condns the join conditions
* @return if given mapjoin position is a feasible big table position return same else -1.
- * @throws SemanticException if given position is not in the big table candidates.
*/
public static int checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) {
Set bigTableCandidates =
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
index 1626e26782..ab86a80217 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
@@ -93,7 +93,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* Generate predicate.
*
- * Subclass should implement the function. Please refer to {@link OpProcFactory.FilterPPR}
+ * Subclass should implement the function. Please refer to {@link org.apache.hadoop.hive.ql.optimizer.ppr.OpProcFactory.FilterPPR}
*
* @param procCtx
* @param fop
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
index 52343f4d69..93ba57a074 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
@@ -95,7 +95,7 @@
* in the query plan and merge them if they met some preconditions.
*
* TS TS TS
- * | | -> / \
+ * | | -> / \
* Op Op Op Op
*
* Now the rule has been extended to find opportunities to other operators
@@ -104,7 +104,7 @@
* TS1 TS2 TS1 TS2 TS1 TS2
* | | | | | |
* | RS | RS | RS
- * \ / \ / -> \ /
+ * \ / \ / -> \ /
* MapJoin MapJoin MapJoin
* | | / \
* Op Op Op Op
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
index a5400d6b27..e581665950 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
@@ -37,9 +37,7 @@
* convert a regular join to a a map-side join.
*
* @param conf
- * @param opParseCtxMap
* @param op join operator
- * @param joinTree qb join tree
* @param bigTablePos position of the source to be read as part of
* map-reduce framework. All other sources are cached in memory
* @param noCheckOuterJoin
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 1f8a48c7ad..6ed8b92178 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -613,7 +613,7 @@ public static boolean orderRelNode(RelNode rel) {
/**
* Get top level select starting from root. Assumption here is root can only
- * be Sort & Project. Also the top project should be at most 2 levels below
+ * be Sort & Project. Also the top project should be at most 2 levels below
* Sort; i.e Sort(Limit)-Sort(OB)-Select
*
* @param rootRel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index 67312a4ee1..f29b1f3c26 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -120,7 +120,7 @@ public static HiveProject create(RelOptCluster cluster, RelNode child, List ex
* are projected multiple times.
*
*
- * This method could optimize the result as {@link #permute} does, but does
+ * This method could optimize the result as permute does, but does
* not at present.
*
* @param rel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
index 600c7c0d07..1d10c60d77 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
@@ -68,8 +68,8 @@
* have m+n=a, 2m+n=b where m is the #row in R1 and n is the #row in R2 then
* m=b-a, n=2a-b, m-n=2b-3a
* if it is except (distinct)
- * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
- * else R5 = Fil (2b-3a>0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
+ * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
+ * else R5 = Fil (2b-3a> 0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
* Note that NULLs are handled the same as other values. Please refer to the test cases.
*/
public class HiveExceptRewriteRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index c331eab37d..0c8c5e1a8e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -46,10 +46,10 @@
import com.google.common.collect.Sets;
/** Not an optimization rule.
- * Rule to aid in translation from Calcite tree -> Hive tree.
+ * Rule to aid in translation from Calcite tree -> Hive tree.
* Transforms :
* Left Right Left Right
- * \ / -> \ /
+ * \ / -> \ /
* Join HashExchange HashExchange
* \ /
* Join
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
index f7712e6c33..cdc94d5629 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
@@ -52,12 +52,12 @@
* column statistics (if available).
*
* For instance, given the following predicate:
- * a > 5
+ * a > 5
* we can infer that the predicate will evaluate to false if the max
* value for column a is 4.
*
* Currently we support the simplification of:
- * - =, >=, <=, >, <
+ * - =, >=, <=, >, <
* - IN
* - IS_NULL / IS_NOT_NULL
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 7ab4e125cc..50ed8eda89 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -69,9 +69,9 @@
*
Sub-queries are represented by {@link RexSubQuery} expressions.
*
*
A sub-query may or may not be correlated. If a sub-query is correlated,
- * the wrapped {@link RelNode} will contain a {@link RexCorrelVariable} before
- * the rewrite, and the product of the rewrite will be a {@link Correlate}.
- * The Correlate can be removed using {@link RelDecorrelator}.
+ * the wrapped {@link RelNode} will contain a {@link org.apache.calcite.rex.RexCorrelVariable} before
+ * the rewrite, and the product of the rewrite will be a {@link org.apache.calcite.rel.core.Correlate}.
+ * The Correlate can be removed using {@link org.apache.calcite.sql2rel.RelDecorrelator}.
*/
public class HiveSubQueryRemoveRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
index 8f96288aa7..c51ae0d879 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
@@ -36,7 +36,7 @@
/**
* JDBCAggregationPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcAggregate}
+ * into a {@link JdbcAggregate}
* and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}
* operator so it will be sent to the external table.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
index 5c03f87361..0e88f53817 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
@@ -33,7 +33,7 @@
/**
* JDBCProjectPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcProject}
+ * into a {@link JdbcProject}
* and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}}
* operator so it will be sent to the external table.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
index aabd75ee1f..a8eb070afc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
@@ -51,7 +51,7 @@
* SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
* FROM TAB_A
* JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- * WHERE TAB_A.ROW_ID > 5
+ * WHERE TAB_A.ROW_ID > 5
* GROUP BY a, b) inner_subq
* GROUP BY a, b;
*
@@ -61,10 +61,10 @@
* SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
* FROM TAB_A
* JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- * WHERE TAB_A.ROW_ID > 5
+ * WHERE TAB_A.ROW_ID > 5
* GROUP BY a, b) source
* ON (mv.a = source.a AND mv.b = source.b)
- * WHEN MATCHED AND mv.c + source.c <> 0
+ * WHEN MATCHED AND mv.c + source.c <> 0
* THEN UPDATE SET mv.s = mv.s + source.s, mv.c = mv.c + source.c
* WHEN NOT MATCHED
* THEN INSERT VALUES (source.a, source.b, s, c);
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 70f83433d8..b304e38edd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -69,7 +69,7 @@
* 1. Change the output col/ExprNodeColumn names to external names.
* 2. Verify if we need to use the "KEY."/"VALUE." in RS cols; switch to
* external names if possible.
- * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
+ * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
* differently for different GB/RS in pipeline. Remove the different treatments.
* 4. VirtualColMap needs to be maintained
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
index 7ff92edd91..3a42191131 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
@@ -203,7 +203,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException {
/**
* Detect correlations and transform the query tree.
*
- * @param pactx
+ * @param pctx
* current parse context
* @throws SemanticException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
index c553dcaa88..d2cf78bee5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
@@ -97,7 +97,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple parents
* @return the single parent or null when the input operator has multiple parents and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleParent(Operator> operator,
boolean throwException) throws SemanticException {
@@ -127,7 +127,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple children
* @return the single child or null when the input operator has multiple children and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleChild(Operator> operator,
boolean throwException) throws SemanticException {
@@ -477,8 +477,7 @@ protected static void isNullOperator(Operator> operator) throws SemanticExcept
* @param newOperator the operator will be inserted between child and parent
* @param child
* @param parent
- * @param context
- * @throws HiveException
+ * @throws SemanticException
*/
protected static void insertOperatorBetween(
Operator> newOperator, Operator> parent, Operator> child)
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
index 06498eb637..076a9961c7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
@@ -119,18 +119,18 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
* Complete dynamic-multi-dimension collection
*
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* Expression Tree : ((c1=1) and (c2=a)) or ( (c1=3) or (c2=b))
@@ -171,7 +171,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
*
* child_nd instanceof ExprNodeConstantDesc
- * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
+ * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
*
*
*
@@ -410,7 +410,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* 2. all other cases, select the directory
* Use case #2:
* Multiple dimension collection represents skewed elements so that walk through tree one by one.
- * Cell is a List representing the value mapping from index path and skewed value.
+ * Cell is a List<String> representing the value mapping from index path and skewed value.
* skewed column: C1, C2, C3
* skewed value: (1,a,x), (2,b,x), (1,c,x), (2,a,y)
* Other: represent value for the column which is not part of skewed value.
@@ -428,8 +428,8 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* ==============
* please see another example in {@link ListBucketingPruner#prune}
* We will use a HasMap to represent the Dynamic-Multiple-Dimension collection:
- * 1. Key is List representing the index path to the cell
- * 2. value represents the cell (Boolean for use case #1, List for case #2)
+ * 1. Key is List<Integer> representing the index path to the cell
+ * 2. value represents the cell (Boolean for use case #1, List<String> for case #2)
* For example:
* 1. skewed column (list): C1, C2, C3
* 2. skewed value (list of list): (1,a,x), (2,b,x), (1,c,x), (2,a,y)
@@ -446,7 +446,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
*
* We use the index,starting at 0. to construct hashmap representing dynamic-multi-dimension
* collection:
- * key (what skewed value key represents) -> value (Boolean for use case #1, List for case
+ * key (what skewed value key represents) -> value (Boolean for use case #1, List<String> for case
* #2).
* (0,0,0) (1,a,x)
* (0,0,1) (1,a,y)
@@ -572,18 +572,18 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* Index: (0,1,2) (0,1,2,3)
*
* Complete dynamic-multi-dimension collection
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* @param uniqSkewedElements
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
index 6c6908a9e8..8903eb7381 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
@@ -104,7 +104,7 @@ private void initialize(HiveConf hiveConf) {
* invoke all the resolvers one-by-one, and alter the physical plan.
*
* @return PhysicalContext
- * @throws HiveException
+ * @throws SemanticException
*/
public PhysicalContext optimize() throws SemanticException {
for (PhysicalPlanResolver r : resolvers) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
index 691e9428d2..03324a6a1d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
@@ -47,7 +47,6 @@
* Evaluate expression with partition columns
*
* @param expr
- * @param partSpec
* @param rowObjectInspector
* @return value returned by the expression
* @throws HiveException
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index b19c7de1b7..93f3927b69 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -224,45 +224,32 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* FILTER operator does not change the average row size but it does change the number of rows
* emitted. The reduction in the number of rows emitted is dependent on the filter expression.
- *
* Notations:
+ *
* - T(S) - Number of tuples in relations S
* - V(S,A) - Number of distinct values of attribute A in relation S
*
+ * Rules:
*
- * Rules:
- * - Column equals a constant
T(S) = T(R) / V(R,A)
- *
- *
- * - Inequality conditions
T(S) = T(R) / 3
- *
- *
- * - Not equals comparison
- Simple formula T(S) = T(R)
- *
- * - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
- *
- *
- * - NOT condition
T(S) = 1 - T(S'), where T(S') is the satisfying condition
- *
- *
- * - Multiple AND conditions
Cascadingly apply the rules 1 to 3 (order doesn't matter)
- *
- *
- * - Multiple OR conditions
- Simple formula is to evaluate conditions independently
- * and sum the results T(S) = m1 + m2
- *
- * - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
- *
+ *
- Column equals a constant T(S) = T(R) / V(R,A)
+ * - Inequality conditions T(S) = T(R) / 3
+ * - Not equals comparison - Simple formula T(S) = T(R)
+ * - - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
+ * - NOT condition T(S) = 1 - T(S'), where T(S') is the satisfying condition
+ * - Multiple AND conditions Cascadingly apply the rules 1 to 3 (order doesn't matter)
+ * - Multiple OR conditions - Simple formula is to evaluate conditions independently
+ * and sum the results T(S) = m1 + m2
+ * - - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
+ *
* where, m1 is the number of tuples that satisfy condition1 and m2 is the number of tuples that
- * satisfy condition2
+ * satisfy condition2
*
- *
* Worst case: If no column statistics are available, then evaluation of predicate
* expression will assume worst case (i.e; half the input rows) for each of predicate expression.
- *
+ *
* For more information, refer 'Estimating The Cost Of Operations' chapter in
* "Database Systems: The Complete Book" by Garcia-Molina et. al.
- *
+ *
*/
public static class FilterStatsRule extends DefaultStatsRule implements NodeProcessor {
@@ -1201,7 +1188,7 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child,
* available then a better estimate can be found by taking the smaller of product of V(R,[A,B,C])
* (product of distinct cardinalities of A,B,C) and T(R)/2.
*
- * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
+ * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
*
* In the presence of grouping sets, map-side GBY will emit more rows depending on the size of
* grouping set (input rows * size of grouping set). These rows will get reduced because of
@@ -1645,12 +1632,12 @@ private boolean checkMapSideAggregation(GroupByOperator gop,
}
/**
- * JOIN operator can yield any of the following three cases
- The values of join keys are
+ * JOIN operator can yield any of the following three cases
- The values of join keys are
* disjoint in both relations in which case T(RXS) = 0 (we need histograms for this)
- Join
* key is primary key on relation R and foreign key on relation S in which case every tuple in S
- * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
+ * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
* have same value for join-key. Ex: bool column with all true values T(RXS) = T(R) * T(S) (we
- * need histograms for this. counDistinct = 1 and same value)
+ * need histograms for this. counDistinct = 1 and same value)
*
* In the absence of histograms, we can use the following general case
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 7b32020f1a..746d0dc55e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -100,7 +100,7 @@ public String getName() {
/**
* For every node in this subtree, make sure it's start/stop token's
* are set. Walk depth first, visit bottom up. Only updates nodes
- * with at least one token index < 0.
+ * with at least one token index < 0.
*
* In contrast to the method in the parent class, this method is
* iterative.
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
index 41e3754cdd..4b2958af2b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
@@ -77,7 +77,7 @@ protected void analyze(ASTNode tree) throws SemanticException {
* were generated. It may also contain insert events that belong to transactions that aborted
* where the same constraints apply.
* In order to make the export artifact free of these constraints, the export does a
- * insert into tmpTable select * from to filter/apply the events in current
+ * insert into tmpTable select * from <export table> to filter/apply the events in current
* context and then export the tmpTable. This export artifact can now be imported into any
* table on any cluster (subject to schema checks etc).
* See {@link #analyzeAcidExport(ASTNode)}
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
index f5d79ed5ab..e385d4e755 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
@@ -413,7 +413,7 @@ public void setExpressions(ArrayList columns)
/**
* Add order expressions from the list of expressions in the format of ASTNode
- * @param args
+ * @param nodes
*/
public void addExpressions(ArrayList nodes) {
for (int i = 0; i < nodes.size(); i++) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index b087831d04..3cc764802a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -209,7 +209,7 @@ public boolean allowEventReplacementInto(Map params){
}
/**
- * Returns a predicate filter to filter an Iterable to return all partitions
+ * Returns a predicate filter to filter an Iterable<Partition> to return all partitions
* that the current replication event specification is allowed to replicate-replace-into
*/
public Predicate allowEventReplacementInto() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index c31666e419..3734882e9b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -120,7 +120,7 @@ public void setDenominator(int den) {
/**
* Gets the ON part's expression list.
*
- * @return ArrayList
+ * @return ArrayList<ASTNode>
*/
public ArrayList getExprs() {
return exprs;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 93641af215..d70353e358 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -929,7 +929,7 @@ public boolean getIsCascade() {
}
/**
- * @param cascade the isCascade to set
+ * @param isCascade the isCascade to set
*/
public void setIsCascade(boolean isCascade) {
this.isCascade = isCascade;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index 7130aba597..434e568295 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -85,12 +85,11 @@ public CreateViewDesc() {
* @param tblProps
* @param partColNames
* @param ifNotExists
- * @param orReplace
+ * @param replace
* @param isAlterViewAs
* @param inputFormat
* @param outputFormat
* @param location
- * @param serName
* @param serde
* @param storageHandler
* @param serdeProps
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
index f9d545f040..ffb81b54b9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
@@ -102,7 +102,7 @@ public MmContext getMmContext() {
* For exporting Acid table, change the "pointer" to the temp table.
* This has to be done after the temp table is populated and all necessary Partition objects
* exist in the metastore.
- * See {@link org.apache.hadoop.hive.ql.parse.AcidExportAnalyzer#isAcidExport(ASTNode)}
+ * See {@link org.apache.hadoop.hive.ql.parse.AcidExportSemanticAnalyzer#isAcidExport(ASTNode)}
* for more info.
*/
public void acidPostProcess(Hive db) throws HiveException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index eb5b111bad..cada47b126 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -415,7 +415,7 @@ private static ExprNodeDesc backtrack(ExprNodeColumnDesc column, Operator> cur
/**
* Join keys are expressions based on the select operator. Resolve the expressions so they
* are based on the ReduceSink operator
- * SEL -> RS -> JOIN
+ * SEL -> RS -> JOIN
* @param source
* @param reduceSinkOp
* @return
@@ -665,10 +665,10 @@ public static PrimitiveTypeInfo deriveMinArgumentCast(
* @param inputOp
* Input Hive Operator
* @param startPos
- * starting position in the input operator schema; must be >=0 and <=
+ * starting position in the input operator schema; must be >=0 and <=
* endPos
* @param endPos
- * end position in the input operator schema; must be >=0.
+ * end position in the input operator schema; must be >=0.
* @return List of ExprNodeDesc
*/
public static ArrayList genExprNodeDesc(Operator inputOp, int startPos, int endPos,
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
index 5f8cf54d57..86d4fefb7d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
@@ -184,7 +184,6 @@ public void setDefaultDirName(String defaultDirName) {
/**
* check if list bucketing is enabled.
*
- * @param ctx
* @return
*/
public boolean isSkewedStoredAsDir() {
@@ -201,7 +200,6 @@ public boolean isSkewedStoredAsDir() {
* 0: not list bucketing
* int: no. of skewed columns
*
- * @param ctx
* @return
*/
public int calculateListBucketingLevel() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index d5a30da419..bb063c52be 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -214,10 +214,10 @@ public void removePathToAlias(Path path){
}
/**
- * This is used to display and verify output of "Path -> Alias" in test framework.
+ * This is used to display and verify output of "Path -> Alias" in test framework.
*
- * QTestUtil masks "Path -> Alias" and makes verification impossible.
- * By keeping "Path -> Alias" intact and adding a new display name which is not
+ * QTestUtil masks "Path -> Alias" and makes verification impossible.
+ * By keeping "Path -> Alias" intact and adding a new display name which is not
* masked by QTestUtil by removing prefix.
*
* Notes: we would still be masking for intermediate directories.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 5229700dbd..c60b574b16 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -1004,7 +1004,7 @@ public static String stripQuotes(String val) {
}
/**
- * Remove prefix from "Path -> Alias"
+ * Remove prefix from "Path -> Alias"
* This is required for testing.
* In order to verify that path is right, we need to display it in expected test result.
* But, mask pattern masks path with some patterns.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
index d24c4ef085..ba5d06e079 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
@@ -85,7 +85,7 @@ public String getDatabaseName() {
}
/**
- * @param databaseName
+ * @param dbName
* the dbName to set
*/
public void setDatabaseName(String dbName) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
index 18cf12c968..609d1740a6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
@@ -77,7 +77,7 @@ public ShowFunctionsDesc(Path resFile, String pattern) {
/**
* @param pattern
* names of tables to show
- * @param like
+ * @param isLikePattern
* is like keyword used
*/
public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
index 8bb40abc8d..52a5d1b22b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
@@ -32,7 +32,7 @@
* 1. It's position in table column is 1.
* 2. It's position in skewed column list is 0.
*
- * This information will be used in {@FileSinkOperator} generateListBucketingDirName
+ * This information will be used in {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator} generateListBucketingDirName
*/
public class SkewedColumnPositionPair {
private int tblColPosition;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 3ed5cb22f6..2f1ec27c64 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -279,7 +279,6 @@ public SparkEdgeProperty getEdgeProperty(BaseWork a, BaseWork b) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b, SparkEdgeProperty edgeProp) {
workGraph.get(a).add(b);
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
index 3539f0d394..ac437783bc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
@@ -370,7 +370,6 @@ public int compareTo(Dependency o) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b,
TezEdgeProperty edgeProp) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
index adcf7078e1..bf5bb2464f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
@@ -27,8 +27,8 @@
/**
- * All member variables should have a setters and getters of the form get and set or else they won't be recreated properly at run
+ * All member variables should have a setters and getters of the form get<member
+ * name> and set<member name> or else they won't be recreated properly at run
* time.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
index caf0c67744..a69f762235 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
@@ -35,17 +35,17 @@
private static final long serialVersionUID = 1L;
/**
- * GLOBAL No key. All rows --> 1 full aggregation on end of input
+ * GLOBAL No key. All rows --> 1 full aggregation on end of input
*
- * HASH Rows aggregated in to hash table on group key -->
+ * HASH Rows aggregated in to hash table on group key -->
* 1 partial aggregation per key (normally, unless there is spilling)
*
* MERGE_PARTIAL As first operator in a REDUCER, partial aggregations come grouped from
- * reduce-shuffle -->
+ * reduce-shuffle -->
* aggregate the partial aggregations and emit full aggregation on
* endGroup / closeOp
*
- * STREAMING Rows come from PARENT operator already grouped -->
+ * STREAMING Rows come from PARENT operator already grouped -->
* aggregate the rows and emit full aggregation on key change / closeOp
*
* NOTE: Hash can spill partial result rows prematurely if it runs low on memory.
@@ -123,16 +123,16 @@ public boolean getIsVectorizationGroupByComplexTypesEnabled() {
*
* Decides using GroupByDesc.Mode and whether there are keys.
*
- * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
+ * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
*
- * Mode.HASH --> ProcessingMode.HASH
+ * Mode.HASH --> ProcessingMode.HASH
*
- * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
+ * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
*
* Mode.PARTIAL1,
* Mode.PARTIAL2,
* Mode.PARTIALS,
- * Mode.FINAL --> ProcessingMode.STREAMING
+ * Mode.FINAL --> ProcessingMode.STREAMING
*
*/
public static ProcessingMode groupByDescModeToVectorProcessingMode(GroupByDesc.Mode mode,
diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
index f1e3267cd3..b3d59e3cd0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
+++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
@@ -53,9 +53,9 @@
* plan generation adds filters where they are seen but in some instances some
* of the filter expressions can be pushed nearer to the operator that sees this
* particular data for the first time. e.g. select a.*, b.* from a join b on
- * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
+ * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
*
- * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
+ * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
* predicate pushdown, would be evaluated after the join processing has been
* done. Suppose the two predicates filter out most of the rows from a and b,
* the join is unnecessarily processing these rows. With predicate pushdown,
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
index bc473ee349..94cfa5178c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
@@ -28,7 +28,7 @@
* CommandProcessor interface. Typically errorMessage
* and SQLState will only be set if the responseCode
* is not 0. Note that often {@code responseCode} ends up the exit value of
- * command shell process so should keep it to < 127.
+ * command shell process so should keep it to < 127.
*/
public class CommandProcessorResponse extends Exception {
private final int responseCode;
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
index d2a864a9be..9d6f47b28b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
@@ -36,7 +36,7 @@
/**
* This class processes HADOOP commands used for HDFS encryption. It is meant to be run
- * only by Hive unit & queries tests.
+ * only by Hive unit & queries tests.
*/
public class CryptoProcessor implements CommandProcessor {
public static final Logger LOG = LoggerFactory.getLogger(CryptoProcessor.class.getName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
index e19c053e14..f690422bfe 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
@@ -169,7 +169,6 @@ public static HivePrivilegeObject getHiveObjectRef(HiveObjectRef privObj) throws
* Convert authorization plugin principal type to thrift principal type
* @param type
* @return
- * @throws HiveException
*/
public static PrincipalType getThriftPrincipalType(HivePrincipalType type) {
if(type == null){
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
index 7678e8f1f8..7037f2c0ed 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
@@ -62,7 +62,7 @@ public Integer getToken() {
/**
* Do case lookup of PrivilegeType associated with this antlr token
- * @param privilegeName
+ * @param token
* @return corresponding PrivilegeType
*/
public static PrivilegeType getPrivTypeByToken(int token) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
index a4079b892e..9352aa2e7c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
@@ -87,7 +87,7 @@ void revokePrivileges(List hivePrincipals, List hi
/**
* Create role
* @param roleName
- * @param adminGrantor - The user in "[ WITH ADMIN ]" clause of "create role"
+ * @param adminGrantor - The user in "[ WITH ADMIN <user> ]" clause of "create role"
* @throws HiveAuthzPluginException
* @throws HiveAccessControlException
*/
@@ -232,7 +232,7 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* returned, the Object has to be of type HiveAuthorizationTranslator
*
* @return
- * @throws HiveException
+ * @throws HiveAuthzPluginException
*/
Object getHiveAuthorizationTranslator() throws HiveAuthzPluginException;
@@ -246,19 +246,19 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* (part 1) It expects a valid filter condition to be returned. Null indicates no filtering is
* required.
*
- * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
+ * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
*
* (part 2) It expects a valid expression as used in a select clause. Null
* is NOT a valid option. If no transformation is needed simply return the
* column name.
*
- * Example: column a -> "a" (no transform)
+ * Example: column a -> "a" (no transform)
*
- * Example: column a -> "reverse(a)" (call the reverse function on a)
+ * Example: column a -> "reverse(a)" (call the reverse function on a)
*
- * Example: column a -> "5" (replace column a with the constant 5)
+ * Example: column a -> "5" (replace column a with the constant 5)
*
- * @return List
+ * @return List<HivePrivilegeObject>
* please return the list of HivePrivilegeObjects that need to be rewritten.
*
* @throws SemanticException
@@ -271,7 +271,6 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* Returning false short-circuits the generation of row/column transforms.
*
* @return
- * @throws SemanticException
*/
boolean needTransform();
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 0b3b19b03e..87d2e68abe 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -146,7 +146,7 @@ public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String o
}
/**
- * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType.COMMAND_PARAMS}
+ * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType#COMMAND_PARAMS}
* @param cmdParams
* @return
*/
@@ -215,7 +215,7 @@ public HivePrivObjectActionType getActionType() {
}
/**
- * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType.TABLE}
+ * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType#TABLE_OR_VIEW}
* In case of DML read operations, this is the set of columns being used.
* Column information is not set for DDL operations and for tables being written into
* @return list of applicable columns
@@ -225,7 +225,7 @@ public HivePrivObjectActionType getActionType() {
}
/**
- * The class name when the type is {@link HivePrivilegeObjectType.FUNCTION}
+ * The class name when the type is {@link HivePrivilegeObjectType#FUNCTION}
* @return the class name
*/
public String getClassName() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
index 988d235bb1..1d79082b4f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
@@ -31,9 +31,6 @@
/**
* This method connects to the temporary storage.
*
- * @param hconf
- * HiveConf that contains the connection parameters.
- * @param sourceTask
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext scc);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
index bae732ca56..1230663391 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
@@ -35,15 +35,12 @@
* database (if not exist).
* This method is usually called in the Hive client side rather than by the mappers/reducers
* so that it is initialized only once.
- * @param hconf HiveConf that contains the configurations parameters used to connect to
- * intermediate stats database.
* @return true if initialization is successful, false otherwise.
*/
public boolean init(StatsCollectionContext context);
/**
* This method connects to the intermediate statistics database.
- * @param hconf HiveConf that contains the connection parameters.
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext context);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 2a7cf8c897..f00c72027a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -787,7 +787,7 @@ public static boolean containsNonPositives(List vals) {
}
/**
- * Get sum of all values in the list that are >0
+ * Get sum of all values in the list that are >0
* @param vals
* - list of values
* @return sum
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
index 675853d66b..21bde4ad0b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
@@ -30,7 +30,6 @@
/**
* Add data to UDF prior to initialization.
* An exception may be thrown if the UDF doesn't know what to do with this data.
- * @param params UDF-specific data to add to the UDF
*/
void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
index ed5882ba39..7a590b87fc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
@@ -139,7 +139,7 @@ private void char2byte(int radix, int fromPos) {
}
/**
- * Convert numbers between different number bases. If toBase>0 the result is
+ * Convert numbers between different number bases. If toBase>0 the result is
* unsigned, otherwise it is signed.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
index cd20783797..63b18fdbc7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
@@ -36,7 +36,7 @@
* 'Ref' parse_url('http://facebook.com/path/p1.php?query=1#Ref', 'PROTOCOL')
* will return 'http' Possible values are
* HOST,PATH,QUERY,REF,PROTOCOL,AUTHORITY,FILE,USERINFO Also you can get a value
- * of particular key in QUERY, using syntax QUERY: eg: QUERY:k1.
+ * of particular key in QUERY, using syntax QUERY:<KEY_NAME> eg: QUERY:k1.
*/
@Description(name = "parse_url",
value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL",
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
index 738fd95869..c657a604c5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
@@ -75,7 +75,7 @@ public DoubleWritable evaluate(LongWritable a) {
/**
* Get the sign of the decimal input
*
- * @param dec decimal input
+ * @param decWritable decimal input
*
* @return -1, 0, or 1 representing the sign of the input decimal
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
index d1517ab7dd..360ae46d4a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
@@ -47,12 +47,12 @@
* Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_(A,B) = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/(n_A+n_B)
@@ -136,12 +136,12 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* algorithm, based on work by Philippe Pébay and Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
index 8b088f8405..b1de95715a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
@@ -44,10 +44,10 @@
* Arbitrary-Order Statistical Moments", Philippe Pebay, Sandia Labs):
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
@@ -128,10 +128,10 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* http://infoserve.sandia.gov/sand_doc/2008/086212.pdf
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
index 960d8fdb89..6125977dfd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
@@ -46,7 +46,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDAF that accepts
- * array, array> and so on (arbitrary levels of nesting).
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting).
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
index 568a7ec0eb..53c657b06e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.io.IntWritable;
/**
- * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
+ * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
*
*/
public abstract class GenericUDAFLeadLag extends AbstractGenericUDAFResolver {
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
index 0d8d659ff6..6597f4b34b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
@@ -64,7 +64,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDF that accepts
- * array, array> and so on (arbitrary levels of nesting). 4. It
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting). 4. It
* can do short-circuit evaluations using DeferedObject.
*/
@InterfaceAudience.Public
@@ -222,7 +222,7 @@ public void close() throws IOException {
/**
* Some functions like comparisons may be affected by appearing order of arguments.
- * This is to convert a function, such as 3 > x to x < 3. The flip function of
+ * This is to convert a function, such as 3 > x to x < 3. The flip function of
* GenericUDFOPGreaterThan is GenericUDFOPLessThan.
*/
public GenericUDF flip() {
@@ -233,7 +233,6 @@ public GenericUDF flip() {
* Gets the negative function of the current one. E.g., GenericUDFOPNotEqual for
* GenericUDFOPEqual, or GenericUDFOPNull for GenericUDFOPNotNull.
* @return Negative function
- * @throws UDFArgumentException
*/
public GenericUDF negative() {
throw new UnsupportedOperationException("Negative function doesn't exist for " + getFuncName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
index ea9a59eeb1..5d3f171afb 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
@@ -37,7 +37,7 @@
/**
* Generic UDF for string function
- * CONCAT_WS(sep, [string | array(string)]+).
+ * CONCAT_WS(sep, [string | array(string)]+).
* This mimics the function from
* MySQL http://dev.mysql.com/doc/refman/5.0/en/string-functions.html#
* function_concat-ws
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
index 25c54e9155..23708dc345 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
@@ -76,7 +76,7 @@
/**
* IF(expr1,expr2,expr3)
- * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
+ * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
* otherwise it returns expr3. IF() returns a numeric or string value, depending
* on the context in which it is used.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
index ee869db12c..70f57b7727 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
@@ -42,7 +42,7 @@
* GenericUDFTimestamp
*
* Example usage:
- * ... CAST( as TIMESTAMP) ...
+ * ... CAST(<Timestamp string> as TIMESTAMP) ...
*
* Creates a TimestampWritableV2 object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
index e5a25c3556..530794e040 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalDayTime
*
* Example usage:
-* ... CAST( as INTERVAL DAY TO SECOND) ...
+* ... CAST(<Interval string> as INTERVAL DAY TO SECOND) ...
*
* Creates a HiveIntervalDayTimeWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
index 804b8e722f..8baf26c18a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalYearMonth
*
* Example usage:
-* ... CAST( as INTERVAL YEAR TO MONTH) ...
+* ... CAST(<Interval string> as INTERVAL YEAR TO MONTH) ...
*
* Creates a HiveIntervalYearMonthWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
index e187355b19..ac23e50f64 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
@@ -44,7 +44,7 @@
* Additionally setup GenericUDTF with MapredContext before initializing.
* This is only called in runtime of MapRedTask.
*
- * @param context context
+ * @param mapredContext context
*/
public void configure(MapredContext mapredContext) {
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
index 8f3dfdbe3c..1fbfa4f814 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
@@ -70,7 +70,7 @@
* where the first occurrence was LATE, followed by zero or more EARLY flights,
* followed by a ONTIME or EARLY flight.
* symbols specify a list of name, expression pairs. For e.g.
- * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
+ * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
* These symbols can be used in the Pattern defined above.
* resultSelectList specified as a select list.
* The expressions in the selectList are evaluated in the context where all the
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
index e2b7035254..f1c4b731b8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
@@ -62,7 +62,7 @@
* Based on Hive {@link GenericUDAFEvaluator}. Break up the responsibility of the old AbstractTableFunction
* class into a Resolver and Evaluator.
*
- * The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ * The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
* The Evaluator is responsible for providing the 2 execute methods:
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
index dbc7693420..bf012ddd03 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
@@ -37,15 +37,15 @@
* old AbstractTableFunction class into a Resolver and Evaluator.
* The Resolver is responsible for:
*
- * - setting up the {@link tableFunctionEvaluator}
+ *
- setting up the {@link TableFunctionEvaluator}
*
- Setting up the The raw and output ObjectInspectors of the Evaluator.
- *
- The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ *
- The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
*
* The Resolver for a function is obtained from the {@link FunctionRegistry}. The Resolver is initialized
* by the following 4 step process:
*
- * - The initialize method is called; which is passed the {@link PTFDesc} and the {@link TableFunctionDef}.
+ *
- The initialize method is called; which is passed the {@link PTFDesc} and the {@link PartitionedTableFunctionDef}.
*
- The resolver is then asked to setup the Raw ObjectInspector. This is only required if the Function reshapes
* the raw input.
*
- Once the Resolver has had a chance to compute the shape of the Raw Input that is fed to the partitioning
@@ -113,8 +113,6 @@ public TableFunctionEvaluator getEvaluator() {
* exist for all the Def (ArgDef, ColumnDef, WindowDef..). It is the responsibility of
* the TableFunction to construct the {@link ExprNodeEvaluator evaluators} and setup the OI.
*
- * @param tblFuncDef
- * @param ptfDesc
* @throws HiveException
*/
public abstract void initializeOutputOI() throws HiveException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
index cb966a7b2e..58e6289583 100644
--- ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
+++ ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
@@ -83,7 +83,7 @@
* but can be made insert-only transactional tables and generate corresponding Alter Table commands.
*
* Note that to convert a table to full CRUD table requires that all files follow a naming
- * convention, namely 0000N_0 or 0000N_0_copy_M, N >= 0, M > 0. This utility can perform this
+ * convention, namely 0000N_0 or 0000N_0_copy_M, N >= 0, M > 0. This utility can perform this
* rename with "execute" option. It will also produce a script (with and w/o "execute" to
* perform the renames).
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
index 76753488ad..92651cd188 100644
--- ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
+++ ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
@@ -17,7 +17,7 @@
/**
* Expression that is defined in triggers.
- * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (>) expression
+ * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (>) expression
* is supported.
*/
public interface Expression {
@@ -43,7 +43,7 @@ public String getSymbol() {
}
/**
- * Evaluate current value against this expression. Return true if expression evaluates to true (current > limit)
+ * Evaluate current value against this expression. Return true if expression evaluates to true (current > limit)
* else false otherwise
*
* @param current - current value against which expression will be evaluated