diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
index ee80606..fb02c68 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
index 248a66a..15b8e62 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterColumnCompareScalar.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
index 2351230..0027033 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDTIColumnCompareScalar.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
* Generated from template FilterDTIColumnCompareScalar.txt, which covers comparison
* expressions between a datetime/interval column and a scalar of the same type, however output is not
* produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
index f9fb12e..91cf4f7 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDTIScalarCompareColumn.txt
@@ -24,7 +24,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
* Generated from template FilterDTIScalarCompareColumn.txt, which covers comparison
* expressions between a datetime/interval scalar and a column of the same type,
* however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
index ee450d3..4edb371 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
/**
* Generated from template FilterDecimalColumnCompareColumn.txt, which covers binary comparison
* filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
index 9943f45..4a116ec 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalColumnCompareDecimalScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.common.type.HiveDecimal;
/**
- * This is a generated class to evaluate a comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
index 4477aff..58f9a2f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterDecimalScalarCompareDecimalColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.common.type.HiveDecimal;
/**
- * This is a generated class to evaluate a comparison on a vector of decimal
+ * This is a generated class to evaluate a comparison on a vector of decimal
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
index 610c062..5cda930 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampColumn.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
index 1b86691..5af0adb 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleColumnCompareTimestampScalar.txt
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
index 73c46a1..1f07ed9 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterLongDoubleScalarCompareTimestampColumn.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterScalarCompareTimestampColumn.txt, which covers comparison
* expressions between a long/double scalar and a timestamp column, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
index 037382c..549c923 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterScalarCompareColumn.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterScalarCompareColumn.txt, which covers binary comparison
* expressions between a scalar and a column, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
index 916bc12..70e601f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringGroupScalarBase.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
index 7ab9f66..ca8b5d9 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareStringScalar.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
index aa229c8..f75c924 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupColumnCompareTruncStringScalar.txt
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
index bfc58a1..9c66f8e 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringGroupScalarCompareStringGroupColumnBase.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
index bb638a4..ae1a0e3 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterStringScalarCompareStringGroupColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
index 8873826..35ca40f 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleColumn.txt
@@ -27,7 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareColumn.txt, which covers binary comparison
* expressions between two columns, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
index 8583eee..0a5419b 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareLongDoubleScalar.txt
@@ -26,7 +26,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterTimestampColumnCompareScalar.txt, which covers comparison
* expressions between a timestamp column and a long/double scalar, however output is not produced
- * in a separate column. The selected vector of the input {@link VectorizedRowBatch} is updated
+ * in a separate column. The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated
* for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
index eeb73c9..0e1c9fe 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampColumn.txt
@@ -31,7 +31,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
/**
* Generated from template FilterTimestampColumnCompareColumn.txt, which covers binary comparison
* filter expressions between two columns. Output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
index 23790a5..034af81 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampColumnCompareTimestampScalar.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterColumnCompareScalar.txt, which covers binary comparison
* expressions between a column and a scalar, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
index 0e10779..1b4c4da 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareLongDoubleColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
* Generated from template FilterScalarCompareColumn.txt, which covers binary comparison
* expressions between a scalar and a column, however output is not produced in a separate column.
- * The selected vector of the input {@link VectorizedRowBatch} is updated for in-place filtering.
+ * The selected vector of the input {@link org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch} is updated for in-place filtering.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
index 5a6def3..0439b4e 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTimestampScalarCompareTimestampColumn.txt
@@ -30,7 +30,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
import org.apache.hadoop.hive.common.type.HiveDecimal;
/**
- * This is a generated class to evaluate a comparison on a vector of timestamp
+ * This is a generated class to evaluate a comparison on a vector of timestamp
* values.
*/
public class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
index c4745d3..53681ee 100644
--- ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/FilterTruncStringScalarCompareStringGroupColumn.txt
@@ -29,7 +29,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
index 08b3e75..7f34c05 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringGroupScalarBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
index 9b11c5e..f478f36 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareStringScalar.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
index 969fe1b..d894935 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupColumnCompareTruncStringScalar.txt
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
index dee2bfc..f34e534 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringGroupScalarCompareStringGroupColumnBase.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public abstract class extends VectorExpression {
diff --git ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
index 5b5e02e..e656a43 100644
--- ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/StringScalarCompareStringGroupColumn.txt
@@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
index 26da73a..5ad8da9 100644
--- ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
+++ ql/src/gen/vectorization/ExpressionTemplates/TruncStringScalarCompareStringGroupColumn.txt
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
/**
- * This is a generated class to evaluate a comparison on a vector of strings.
+ * This is a generated class to evaluate a comparison on a vector of strings.
* Do not edit the generated code directly.
*/
public class extends {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index dfad6c1..ff71411 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -115,8 +115,8 @@ private void updatePaths(Path tp, Path ttp) {
/**
* Fixes tmpPath to point to the correct partition. Initialize operator will
* set tmpPath and taskTmpPath based on root table directory. So initially,
- * tmpPath will be /_tmp.-ext-10000 and taskTmpPath will be
- * /_task_tmp.-ext-10000. The depth of these two paths will be 0.
+ * tmpPath will be <prefix>/_tmp.-ext-10000 and taskTmpPath will be
+ * <prefix>/_task_tmp.-ext-10000. The depth of these two paths will be 0.
* Now, in case of dynamic partitioning or list bucketing the inputPath will
* have additional sub-directories under root table directory. This function
* updates the tmpPath and taskTmpPath to reflect these additional
@@ -129,10 +129,10 @@ private void updatePaths(Path tp, Path ttp) {
* Note: The path difference between inputPath and tmpDepth can be DP or DP+LB.
* This method will automatically handle it.
*
- * Continuing the example above, if inputPath is /-ext-10000/hr=a1/,
+ * Continuing the example above, if inputPath is <prefix>/-ext-10000/hr=a1/,
* newPath will be hr=a1/. Then, tmpPath and taskTmpPath will be updated to
- * /-ext-10000/hr=a1/_tmp.ext-10000 and
- * /-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
+ * <prefix>/-ext-10000/hr=a1/_tmp.ext-10000 and
+ * <prefix>/-ext-10000/hr=a1/_task_tmp.ext-10000 respectively.
* We have list_bucket_dml_6.q cover this case: DP + LP + multiple skewed
* values + merge.
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
index 07fd653..13e60dc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java
@@ -70,8 +70,8 @@
* evaluated before emitting rows. Currently, relevant only for outer joins.
*
* For instance, given the query:
- * select * from t1 right outer join t2 on t1.c1 + t2.c2 > t1.c3;
- * The expression evaluator for t1.c1 + t2.c2 > t1.c3 will be stored in this list.
+ * select * from t1 right outer join t2 on t1.c1 + t2.c2 > t1.c3;
+ * The expression evaluator for t1.c1 + t2.c2 > t1.c3 will be stored in this list.
*/
protected transient List residualJoinFilters;
@@ -437,21 +437,21 @@ protected long getNextSize(long sz) {
* a = 100, 10 | 100, 20 | 100, 30
* b = 100, 10 | 100, 20 | 100, 30
*
- * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
- * 0(a) = [1(b),1] : a.v>10
- * 1(b) = [0(a),1] : b.v>30
+ * the query "a FO b ON a.k=b.k AND a.v>10 AND b.v>30" makes filter map
+ * 0(a) = [1(b),1] : a.v>10
+ * 1(b) = [0(a),1] : b.v>0
*
* for filtered rows in a (100,10) create a-NULL
* for filtered rows in b (100,10) (100,20) (100,30) create NULL-b
*
- * with 0(a) = [1(b),1] : a.v>10
+ * with 0(a) = [1(b),1] : a.v>10
* 100, 10 = 00000010 (filtered)
* 100, 20 = 00000000 (valid)
* 100, 30 = 00000000 (valid)
* -------------------------
* sum = 00000000 : for valid rows in b, there is at least one pair in a
*
- * with 1(b) = [0(a),1] : b.v>30
+ * with 1(b) = [0(a),1] : b.v>30
* 100, 10 = 00000001 (filtered)
* 100, 20 = 00000001 (filtered)
* 100, 30 = 00000001 (filtered)
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
index 41a9cb3..794291a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ComparisonOpMethodResolver.java
@@ -28,8 +28,8 @@
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
/**
- * The class implements the method resolution for operators like (> < <= >= =
- * <>). The resolution logic is as follows: 1. If one of the parameters is null,
+ * The class implements the method resolution for operators like (> < <= >= =
+ * <>). The resolution logic is as follows: 1. If one of the parameters is null,
* then it resolves to evaluate(Double, Double) 2. If both of the parameters are
* of type T, then it resolves to evaluate(T, T) 3. If 1 and 2 fails then it
* resolves to evaluate(Double, Double).
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 87928ee..39a88d0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4810,13 +4810,13 @@ private void makeLocationQualified(CreateIndexDesc crtIndex, String name) throws
}
}
- /**
+ public static final String DATABASE_PATH_SUFFIX = ".db";
+ /**
* Make qualified location for a database .
*
* @param database
* Database.
*/
- public static final String DATABASE_PATH_SUFFIX = ".db";
private void makeLocationQualified(Database database) throws HiveException {
if (database.isSetLocationUri()) {
database.setLocationUri(Utilities.getQualifiedPath(conf, new Path(database.getLocationUri())));
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
index 2a1be63..9d3507d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
@@ -36,9 +36,9 @@
* Consider a query like:
*
* select * from
- * (subq1 --> has a filter)
+ * (subq1 --> has a filter)
* join
- * (subq2 --> has a filter)
+ * (subq2 --> has a filter)
* on some key
*
* Let us assume that subq1 is the small table (either specified by the user or inferred
@@ -50,12 +50,12 @@
*
* Therefore the following operator tree is created:
*
- * TableScan (subq1) --> Select --> Filter --> DummyStore
+ * TableScan (subq1) --> Select --> Filter --> DummyStore
* \
* \ SMBJoin
* /
* /
- * TableScan (subq2) --> Select --> Filter
+ * TableScan (subq2) --> Select --> Filter
*
* In order to fetch the row with the least join key from the small table, the row from subq1
* is partially processed, and stored in DummyStore. For the actual processing of the join,
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index 9795f3e..dc26264 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -697,8 +697,8 @@ static int getCommonLength(int aLen, int bLen) {
* return a TypeInfo corresponding to the common PrimitiveCategory, and with type qualifiers
* (if applicable) that match the 2 TypeInfo types.
* Examples:
- * varchar(10), varchar(20), primitive category varchar => varchar(20)
- * date, string, primitive category string => string
+ * varchar(10), varchar(20), primitive category varchar => varchar(20)
+ * date, string, primitive category string => string
* @param a TypeInfo of the first type
* @param b TypeInfo of the second type
* @param typeCategory PrimitiveCategory of the designated common type between a and b
@@ -1310,7 +1310,6 @@ public static Method getMethodInternal(Class> udfClass, List mlist, bo
/**
* A shortcut to get the "index" GenericUDF. This is used for getting elements
* out of array and getting values out of map.
- * @throws SemanticException
*/
public static GenericUDF getGenericUDFForIndex() {
try {
@@ -1322,7 +1321,6 @@ public static GenericUDF getGenericUDFForIndex() {
/**
* A shortcut to get the "and" GenericUDF.
- * @throws SemanticException
*/
public static GenericUDF getGenericUDFForAnd() {
try {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
index cf3c5f0..1698ea0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java
@@ -64,7 +64,7 @@
*
* The output of select in the left branch and output of the UDTF in the right
* branch are then sent to the lateral view join (LVJ). In most cases, the UDTF
- * will generate > 1 row for every row received from the TS, while the left
+ * will generate > 1 row for every row received from the TS, while the left
* select operator will generate only one. For each row output from the TS, the
* LVJ outputs all possible rows that can be created by joining the row from the
* left select and one of the rows output from the UDTF.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
index 48ae02f..9ee8a1a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java
@@ -264,7 +264,7 @@ public void tryStoreVectorizedKey(HiveKey key, boolean partColsIsNull, int batch
/**
* Get vectorized batch result for particular index.
* @param batchIndex index of the key in the batch.
- * @return the result, same as from {@link #tryStoreKey(HiveKey)}
+ * @return the result, same as from {@link TopNHash#tryStoreKey(HiveKey,boolean)}
*/
public int getVectorizedBatchResult(int batchIndex) {
int result = batchIndexToResult[batchIndex];
@@ -309,9 +309,8 @@ public int getVectorizedKeyHashCode(int batchIndex) {
/**
* Stores the value for the key in the heap.
* @param index The index, either from tryStoreKey or from tryStoreVectorizedKey result.
- * @param hasCode hashCode of key, used by ptfTopNHash.
+ * @param hashCode hashCode of key, used by ptfTopNHash.
* @param value The value to store.
- * @param keyHash The key hash to store.
* @param vectorized Whether the result is coming from a vectorized batch.
*/
public void storeValue(int index, int hashCode, BytesWritable value, boolean vectorized) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
index 57a2d71..3e771e1 100755
--- ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/UDAF.java
@@ -20,9 +20,9 @@
/**
* Please see the deprecation notice
- *
+ *
* Base class for all User-defined Aggregation Function (UDAF) classes.
- *
+ *
* Requirements for a UDAF class:
*
* - Implement the {@code init()} method, which resets the status of the aggregation function.
@@ -57,7 +57,7 @@
* aggregation result and returns a boolean. The method should always return
* {@code true} on valid inputs, or the framework will throw an Exception.
*
- *
+ *
* Following are some examples:
*
* - public int evaluatePartial();
@@ -65,7 +65,6 @@
* - public String evaluatePartial();
* - public boolean aggregatePartial(String partial);
*
- *
*
* @deprecated Either implement {@link org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver2} or extend
* {@link org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver} instead.
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index c70e1e0..44cac9f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1928,7 +1928,7 @@ public static String formatBinaryString(byte[] array, int start, int length) {
* If there is no db name part, set the current sessions default db
* @param dbtable
* @return String array with two elements, first is db name, second is table name
- * @throws HiveException
+ * @throws SemanticException
*/
public static String[] getDbTableName(String dbtable) throws SemanticException {
return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable);
@@ -3621,9 +3621,9 @@ public static boolean isDefaultNameNode(HiveConf conf) {
}
/**
- * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
+ * Checks if the current HiveServer2 logging operation level is >= PERFORMANCE.
* @param conf Hive configuration.
- * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
+ * @return true if current HiveServer2 logging operation level is >= PERFORMANCE.
* Else, false.
*/
public static boolean isPerfOrAboveLogging(HiveConf conf) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
index 2d6d1b5..3a66c23 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/errors/ScriptErrorHeuristic.java
@@ -28,7 +28,7 @@
*
* Conditions to check:
*
- * 1. "Script failed with code " is in the log
+ * 1. "Script failed with code <some number>" is in the log
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
index 360b639..782ff06 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java
@@ -491,7 +491,7 @@ public byte getValueResult(byte[] key, int offset, int length, Result hashMapRes
}
/**
- * Take the segment reference from {@link #getValueRefs(byte[], int, List)}
+ * Take the segment reference from getValueRefs(byte[],int,List)
* result and makes it self-contained - adds byte array where the value is stored, and
* updates the offset from "global" write buffers offset to offset within that array.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index b6e55c0..ad34fd1 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -840,7 +840,7 @@ public Path getDefaultDestDir(Configuration conf) throws LoginException, IOExcep
* to provide on the cluster as resources for execution.
*
* @param conf
- * @return List local resources to add to execution
+ * @return List<LocalResource> local resources to add to execution
* @throws IOException when hdfs operation fails
* @throws LoginException when getDefaultDestDir fails with the same exception
*/
@@ -887,7 +887,7 @@ public Path getDefaultDestDir(Configuration conf) throws LoginException, IOExcep
* @param hdfsDirPathStr Destination directory in HDFS.
* @param conf Configuration.
* @param inputOutputJars The file names to localize.
- * @return List local resources to add to execution
+ * @return List<LocalResource> local resources to add to execution
* @throws IOException when hdfs operation fails.
* @throws LoginException when getDefaultDestDir fails with the same exception
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
index c10e53d..453e293 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HivePreWarmProcessor.java
@@ -47,7 +47,7 @@
* A simple sleep processor implementation that sleeps for the configured
* time in milliseconds.
*
- * @see Config for configuring the HivePreWarmProcessor
+ * @see Configuration for configuring the HivePreWarmProcessor
*/
public class HivePreWarmProcessor extends AbstractLogicalIOProcessor {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index fe5c6a1..73539c9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -167,11 +167,6 @@ public boolean isOpen() {
return true;
}
-
- /**
- * Get all open sessions. Only used to clean up at shutdown.
- * @return List
- */
public static String makeSessionId() {
return UUID.randomUUID().toString();
}
@@ -189,7 +184,7 @@ public void open(HiveConf conf)
* @throws URISyntaxException
* @throws LoginException
* @throws TezException
- * @throws InterruptedException
+ * @throws IllegalArgumentException
*/
public void open(HiveConf conf, String[] additionalFiles)
throws IOException, LoginException, IllegalArgumentException, URISyntaxException, TezException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
index 3e1fcdd..922945a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java
@@ -38,7 +38,7 @@
* A hash map key wrapper for vectorized processing.
* It stores the key values as primitives in arrays for each supported primitive type.
* This works in conjunction with
- * {@link org.apache.hadoop.hive.ql.exec.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
+ * {@link org.apache.hadoop.hive.ql.exec.vector.VectorHashKeyWrapperBatch VectorHashKeyWrapperBatch}
* to hash vectorized processing units (batches).
*/
public class VectorHashKeyWrapper extends KeyWrapper {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index e546a65..abfcaee 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -194,7 +194,6 @@ public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx, PartitionDes
* Creates a Vectorized row batch and the column vectors.
*
* @return VectorizedRowBatch
- * @throws HiveException
*/
public VectorizedRowBatch createVectorizedRowBatch()
{
@@ -241,7 +240,6 @@ public VectorizedRowBatch createVectorizedRowBatch()
*
* @param batch
* @param partitionValues
- * @throws HiveException
*/
public void addPartitionColsToBatch(VectorizedRowBatch batch, Object[] partitionValues)
{
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
index 266365e..2f2bdc5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java
@@ -24,7 +24,7 @@
* A high-performance set implementation used to support fast set membership testing,
* using Cuckoo hashing. This is used to support fast tests of the form
*
- * column IN ( getRealClass() throws IOException {
return (Class) conf.getClass(SerializationSubclassKey, null,
@@ -145,8 +140,6 @@ public Configuration getConf() {
* deserialized; in this context, that assumption isn't necessarily true.
*
* @return the serialization object for this context
- * @exception does
- * not currently throw any IOException
*/
@Override
public Serialization getSerialization() throws IOException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 46f9970..617a507 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -50,7 +50,7 @@
* data. The binary search can be used by setting the value of inputFormatSorted in the
* MapreduceWork to true, but it should only be used if the data is going to a FilterOperator,
* which filters by comparing a value in the data with a constant, using one of the comparisons
- * =, <, >, <=, >=. If the RecordReader's underlying format is an RCFile, this object can perform
+ * =, <, >, <=, >=. If the RecordReader's underlying format is an RCFile, this object can perform
* a binary search to find the block to begin reading from, and stop reading once it can be
* determined no other entries will match the filter.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
index 9ad7f37..d4fc6ea 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java
@@ -37,8 +37,8 @@
import org.apache.hadoop.util.Progressable;
/**
- * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the to TextOutputFormat.RecordWriter.
+ * HiveIgnoreKeyTextOutputFormat replaces key with null before feeding the <key,
+ * value> to TextOutputFormat.RecordWriter.
*
*/
public class HiveIgnoreKeyTextOutputFormat
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
index 7eeaa3b..044ee46 100755
--- ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/IgnoreKeyTextOutputFormat.java
@@ -30,7 +30,7 @@
import org.apache.hadoop.util.Progressable;
/**
- * This class replaces key with null before feeding the to
+ * This class replaces key with null before feeding the <key, value> to
* TextOutputFormat.RecordWriter.
*
* @deprecated use {@link HiveIgnoreKeyTextOutputFormat} instead}
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
index f41edc4..d089320 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java
@@ -99,10 +99,10 @@
* The {@link Reader} is used to read and explain the bytes of RCFile.
*
*
- *
+ *
*
*
- *
+ *
*
* - version - 3 bytes of magic header RCF, followed by 1 byte of
* actual version number (e.g. RCF1)
@@ -114,10 +114,10 @@
* - sync - A sync marker to denote end of the header.
*
*
- * RCFile Format
+ * RCFile Format
*
* - Header
- * - Record
+ *
- Record
* - Key part
*
* - Record length in bytes
@@ -133,7 +133,6 @@
* - ...
*
*
- *
* - Value part
*
* - Compressed or plain data of [column_1_row_1_value,
@@ -143,7 +142,6 @@
*
*
*
- *
*
* {@code
* The following is a pseudo-BNF grammar for RCFile. Comments are prefixed
@@ -336,7 +334,6 @@
* Text ::= VInt, Chars (Length prefixed UTF-8 characters)
* }
*
- *
*/
public class RCFile {
@@ -1095,7 +1092,7 @@ private void checkAndWriteSync() throws IOException {
private int columnBufferSize = 0;
/**
- * Append a row of values. Currently it only can accept <
+ * Append a row of values. Currently it only can accept <
* {@link BytesRefArrayWritable}. If its size() is less than the
* column number in the file, zero bytes are appended for the empty columns.
* If its size() is greater then the column number in the file, the exceeded
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
index 94b9431..4f20e61 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileWork.java
@@ -143,7 +143,7 @@ public void resolveDynamicPartitionStoredAsSubDirsMerge(HiveConf conf,
/**
* alter table ... concatenate
- *
+ *
* If it is skewed table, use subdirectories in inputpaths.
*/
public void resolveConcatenateMerge(HiveConf conf) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
index a591ce8..7d9a927 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/package-info.java
@@ -43,7 +43,7 @@
* Support for additional generic compression: LZO, SNAPPY, ZLIB.
*
*
- *
+ *
* Format:
*
* {@code
@@ -54,9 +54,8 @@
* PS LENGTH (1 byte)
* }
*
- *
*
- *
+ *
* Stripe:
*
* {@code
@@ -65,6 +64,5 @@
* STRIPE-FOOTER
* }
*
- *
*/
package org.apache.hadoop.hive.ql.io.orc;
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
index f350035..31658ff 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
@@ -36,7 +36,7 @@
/**
*
- * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.
+ * The ArrayWritableObjectInspector will inspect an ArrayWritable, considering it as a Hive struct.
* It can also inspect a List if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
index 143d72e..139ed82 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/DeepParquetHiveMapInspector.java
@@ -21,8 +21,8 @@
import org.apache.hadoop.io.Writable;
/**
- * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
- * It can also inspect a Map if Hive decides to inspect the result of an inspection.
+ * The DeepParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * It can also inspect a Map if Hive decides to inspect the result of an inspection.
* When trying to access elements from the map it will iterate over all keys, inspecting them and comparing them to the
* desired key.
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
index 55614a3..8e55766 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveArrayInspector.java
@@ -22,7 +22,7 @@
import org.apache.hadoop.io.Writable;
/**
- * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.
+ * The ParquetHiveArrayInspector will inspect an ArrayWritable, considering it as an Hive array.
* It can also inspect a List if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
index 22250b3..bd66d9c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/StandardParquetHiveMapInspector.java
@@ -20,7 +20,7 @@
import org.apache.hadoop.io.Writable;
/**
- * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
+ * The StandardParquetHiveMapInspector will inspect an ArrayWritable, considering it as a Hive map.
* It can also inspect a Map if Hive decides to inspect the result of an inspection.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
index 30f6494..9b8d14f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/timestamp/NanoTimeUtils.java
@@ -68,7 +68,7 @@ public static NanoTime getNanoTime(Timestamp ts, boolean skipConversion) {
* If the timezone of the calendar is different from the current local
* timezone, then the timestamp value will be adjusted.
* Possible adjustments:
- * - UTC Ts -> Local Ts copied to TableTZ Calendar -> UTC Ts -> JD
+ * - UTC Ts -> Local Ts copied to TableTZ Calendar -> UTC Ts -> JD
* @param ts floating time timestamp to store
* @param calendar timezone used to adjust the timestamp for parquet
* @return adjusted julian date
@@ -112,7 +112,7 @@ public static Timestamp getTimestamp(NanoTime nt, boolean skipConversion) {
* If the timezone of the calendar is different from the current local
* timezone, then the timestamp value will be adjusted.
* Possible adjustments:
- * - JD -> UTC Ts -> TableTZ Calendar copied to LocalTZ Calendar -> UTC Ts
+ * - JD -> UTC Ts -> TableTZ Calendar copied to LocalTZ Calendar -> UTC Ts
* @param nt stored julian date
* @param calendar timezone used to adjust the timestamp for parquet
* @return floating time represented as a timestamp. Guaranteed to display
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
index 55f0b39..7a1dd7d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/Node.java
@@ -30,7 +30,7 @@
* Gets the vector of children nodes. This is used in the graph walker
* algorithms.
*
- * @return List extends Node>
+ * @return List<? extends Node>
*/
List extends Node> getChildren();
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
index 6f7962e..efabb17 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleExactMatch.java
@@ -36,13 +36,13 @@
* The rule specified as operator names separated by % symbols, the left side represents the
* bottom of the stack.
*
- * E.g. TS%FIL%RS -> means
+ * E.g. TS%FIL%RS -> means
* TableScan Node followed by Filter followed by ReduceSink in the tree, or, in terms of the
* stack, ReduceSink on top followed by Filter followed by TableScan
*
* @param ruleName
* name of the rule
- * @param regExp
+ * @param pattern
* string specification of the rule
**/
public RuleExactMatch(String ruleName, String[] pattern) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
index 1e850d6..7b74920 100644
--- ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
+++ ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
@@ -87,7 +87,7 @@ private static boolean patternHasOnlyWildCardChar(String pattern, char wcc) {
/**
* The rule specified by the regular expression. Note that, the regular
- * expression is specified in terms of Node name. For eg: TS.*RS -> means
+ * expression is specified in terms of Node name. For eg: TS.*RS -> means
* TableScan Node followed by anything any number of times followed by
* ReduceSink
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
index 4db10bb..235e621 100644
--- ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
+++ ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
@@ -26,13 +26,13 @@
import org.apache.logging.log4j.core.pattern.ConverterKeys;
/**
- * FilePattern converter that converts %pid pattern to @ information
+ * FilePattern converter that converts %pid pattern to <process-id>@<hostname> information
* obtained at runtime.
*
* Example usage:
- *
+ * <RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz">
*
- * Will generate output file with name containing @ like below
+ * Will generate output file with name containing <process-id>@<hostname> like below
* test.log.95232@localhost.gz
*/
@Plugin(name = "PidFilePatternConverter", category = "FileConverter")
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 88c73f0..b2529c3 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -607,7 +607,7 @@ public void createTable(String tableName, List columns, List par
* new name of the table. could be the old name
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterTable(String tblName, Table newTbl, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
@@ -652,7 +652,7 @@ public void alterIndex(String baseTableName, String indexName, Index newIdx)
* new name of the index. could be the old name
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterIndex(String dbName, String baseTblName, String idxName, Index newIdx)
throws InvalidOperationException, HiveException {
@@ -674,7 +674,7 @@ public void alterIndex(String dbName, String baseTblName, String idxName, Index
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
@@ -693,7 +693,7 @@ public void alterPartition(String tblName, Partition newPart, EnvironmentContext
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
@@ -730,7 +730,7 @@ private void validatePartition(Partition newPart) throws HiveException {
* new partitions
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
@@ -766,9 +766,7 @@ public void alterPartitions(String tblName, List newParts, Environmen
* spec of old partition
* @param newPart
* new partition
- * @throws InvalidOperationException
- * if the changes in metadata is not acceptable
- * @throws TException
+ * @throws HiveException
*/
public void renamePartition(Table tbl, Map oldPartSpec, Partition newPart)
throws HiveException {
@@ -1626,7 +1624,6 @@ public Database getDatabaseCurrent() throws HiveException {
* @param isSrcLocal
* @param isAcid
* @param hasFollowingStatsTask
- * @return
* @throws HiveException
*/
public void loadPartition(Path loadPath, String tableName,
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
index bd8c60a..a826ecf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java
@@ -145,7 +145,7 @@ public void configureTableJobProperties(
* Called just before submitting MapReduce job.
*
* @param tableDesc descriptor for the table being accessed
- * @param JobConf jobConf for MapReduce job
+ * @param jobConf jobConf for MapReduce job
*/
public void configureJobConf(TableDesc tableDesc, JobConf jobConf);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
index b00044a..1f8d554 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java
@@ -26,11 +26,11 @@
/**
- * PartitionIterable - effectively a lazy Iterable
+ * PartitionIterable - effectively a lazy Iterable<Partition>
*
* Sometimes, we have a need for iterating through a list of partitions,
* but the list of partitions can be too big to fetch as a single object.
- * Thus, the goal of PartitionIterable is to act as an Iterable
+ * Thus, the goal of PartitionIterable is to act as an Iterable<Partition>
* while lazily fetching each relevant partition, one after the other as
* independent metadata calls.
*
@@ -134,7 +134,7 @@ public void remove() {
/**
* Dummy constructor, which simply acts as an iterator on an already-present
* list of partitions, allows for easy drop-in replacement for other methods
- * that already have a List
+ * that already have a List<Partition>
*/
public PartitionIterable(Collection ptnsProvided){
this.currType = Type.LIST_PROVIDED;
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index a53f774..11571db 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -654,7 +654,7 @@ private boolean isField(String col) {
* Returns a list of all the columns of the table (data columns + partition
* columns in that order.
*
- * @return List
+ * @return List<FieldSchema>
*/
public List getAllCols() {
ArrayList f_list = new ArrayList();
@@ -866,7 +866,7 @@ public boolean isIndexTable() {
}
/**
- * Creates a partition name -> value spec map object
+ * Creates a partition name -> value spec map object
*
* @param tp
* Use the information from this partition.
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index 45839ad..73765d6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -302,7 +302,7 @@ public static ColumnPrunerScriptProc getScriptProc() {
* - add column names referenced in WindowFn args and in WindowFn expressions
* to the pruned list of the child Select Op.
* - finally we set the prunedColList on the ColumnPrunerContx;
- * and update the RR & signature on the PTFOp.
+ * and update the RR & signature on the PTFOp.
*/
public static class ColumnPrunerPTFProc extends ColumnPrunerScriptProc {
@Override
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index 5102d81..eb78471 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -47,7 +47,7 @@
/**
* This class implements the processor context for Constant Propagate.
*
- * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
+ * ConstantPropagateProcCtx keeps track of propagated constants in a column->const map for each
* operator, enabling constants to be revolved across operators.
*/
public class ConstantPropagateProcCtx implements NodeProcessorCtx {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index 517ce31..4e16c80 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -121,7 +121,7 @@ private ConstantPropagateProcFactory() {
/**
* Get ColumnInfo from column expression.
*
- * @param rr
+ * @param rs
* @param desc
* @return
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
index 058a507..b202666 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java
@@ -62,14 +62,14 @@
/**
* Queries of form : select max(c), count(distinct c) from T; generates a plan
- * of form TS->mGBy->RS->rGBy->FS This plan suffers from a problem that vertex
- * containing rGBy->FS necessarily need to have 1 task. This limitation results
+ * of form TS->mGBy->RS->rGBy->FS This plan suffers from a problem that vertex
+ * containing rGBy->FS necessarily need to have 1 task. This limitation results
* in slow execution because that task gets all the data. This optimization if
* successful will rewrite above plan to mGby1-rs1-mGby2-mGby3-rs2-rGby1 This
* introduces extra vertex of mGby2-mGby3-rs2. Note this vertex can have
* multiple tasks and since we are doing aggregation, output of this must
* necessarily be smaller than its input, which results in much less data going
- * in to original rGby->FS vertex, which continues to have single task. Also
+ * in to original rGby->FS vertex, which continues to have single task. Also
* note on calcite tree we have HiveExpandDistinctAggregatesRule rule which does
* similar plan transformation but has different conditions which needs to be
* satisfied. Additionally, we don't do any costing here but this is possibly
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
index 4387c42..130e159 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java
@@ -176,8 +176,6 @@ public GenMRProcContext() {
* hive configuration
* @param opTaskMap
* reducer to task mapping
- * @param seenOps
- * operator already visited
* @param parseCtx
* current parse context
* @param rootTasks
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 88bf829..c18bd01 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -432,8 +432,8 @@ static void splitPlan(ReduceSinkOperator cRS, GenMRProcContext opProcCtx)
* current alias
* @param topOp
* the top operator of the stack
- * @param plan
- * current plan
+ * @param task
+ * current task
* @param local
* whether you need to add to map-reduce or local work
* @param opProcCtx
@@ -452,8 +452,8 @@ public static void setTaskPlan(String alias_id,
* current alias
* @param topOp
* the top operator of the stack
- * @param plan
- * current plan
+ * @param task
+ * current task
* @param local
* whether you need to add to map-reduce or local work
* @param opProcCtx
@@ -474,13 +474,11 @@ public static void setTaskPlan(String alias_id,
*
* @param alias_id
* current alias
- * @param topOp
- * the top operator of the stack
* @param plan
* map work to initialize
* @param local
* whether you need to add to map-reduce or local work
- * @param pList
+ * @param partsList
* pruned partition list. If it is null it will be computed on-the-fly.
* @param inputs
* read entities for the map work
@@ -758,7 +756,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set topOp, MapWork plan, boolean local,
@@ -1220,7 +1218,6 @@ public static void replaceMapWork(String sourceAlias, String targetAlias,
/**
* @param fsInput The FileSink operator.
- * @param ctx The MR processing context.
* @param finalName the final destination path the merge job should output.
* @param dependencyTask
* @param mvTasks
@@ -1245,11 +1242,11 @@ public static void replaceMapWork(String sourceAlias, String targetAlias,
* v
* FileSinkOperator (fsMerge)
*
- * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
+ * Here the pathToPartitionInfo & pathToAlias will remain the same, which means the paths
* do
* not contain the dynamic partitions (their parent). So after the dynamic partitions are
* created (after the first job finished before the moveTask or ConditionalTask start),
- * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
+ * we need to change the pathToPartitionInfo & pathToAlias to include the dynamic
* partition
* directories.
*
@@ -1564,8 +1561,8 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf,
*
* @param fsInputDesc
* @param finalName
+ * @param hasDynamicPartitions
* @param ctx
- * @param inputFormatClass
* @return MergeWork if table is stored as RCFile or ORCFile,
* null otherwise
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
index 4ddb545..e34f3e5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GlobalLimitOptimizer.java
@@ -50,11 +50,11 @@
/**
* This optimizer is used to reduce the input size for the query for queries which are
* specifying a limit.
- *
+ *
* For eg. for a query of type:
- *
- * select expr from T where limit 100;
- *
+ *
+ * select expr from T where <filter< limit 100;
+ *
* Most probably, the whole table T need not be scanned.
* Chances are that even if we scan the first file of T, we would get the 100 rows
* needed by this query.
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 48021cb..6475945 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -55,15 +55,15 @@
*
* Without this optimization:
*
- * TS -> FIL -> SEL -> RS ->
- * JOIN -> SEL -> FS
- * TS -> FIL -> SEL -> RS ->
+ * TS -> FIL -> SEL -> RS ->
+ * JOIN -> SEL -> FS
+ * TS -> FIL -> SEL -> RS ->
*
* With this optimization
*
- * TS -> FIL -> RS ->
- * JOIN -> FS
- * TS -> FIL -> RS ->
+ * TS -> FIL -> RS ->
+ * JOIN -> FS
+ * TS -> FIL -> RS ->
*
* Note absence of select operator after filter and after join operator.
* Also, see : identity_proj_remove.q
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
index 9bf197b..64f7975 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java
@@ -54,7 +54,7 @@
* If RS is only for limiting rows, RSHash counts row with same key separately.
* But if RS is for GBY, RSHash should forward all the rows with the same key.
*
- * Legend : A(a) --> key A, value a, row A(a)
+ * Legend : A(a) --> key A, value a, row A(a)
*
* If each RS in mapper tasks is forwarded rows like this
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index d84a1e6..7adffc9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -273,11 +273,8 @@ private static void validateMapJoinTypes(Operator extends OperatorDesc> op)
/**
* convert a regular join to a a map-side join.
*
- * @param opParseCtxMap
* @param op
* join operator
- * @param joinTree
- * qb join tree
* @param mapJoinPos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
@@ -408,11 +405,8 @@ private static boolean needValueIndex(int[] valueIndex) {
/**
* convert a sortmerge join to a a map-side join.
*
- * @param opParseCtxMap
* @param smbJoinOp
* join operator
- * @param joinTree
- * qb join tree
* @param bigTablePos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
@@ -560,7 +554,6 @@ public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator o
* @param mapJoinPos the position of big table as determined by either hints or auto conversion.
* @param condns the join conditions
* @return if given mapjoin position is a feasible big table position return same else -1.
- * @throws SemanticException if given position is not in the big table candidates.
*/
public static int checkMapJoin(int mapJoinPos, JoinCondDesc[] condns) {
Set bigTableCandidates = MapJoinProcessor.getBigTableCandidates(condns);
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
index 51464e5..f3b4687 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/PrunerOperatorFactory.java
@@ -92,7 +92,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* Generate predicate.
*
- * Subclass should implement the function. Please refer to {@link OpProcFactory.FilterPPR}
+ * Subclass should implement the function. Please refer to {@link org.apache.hadoop.hive.ql.optimizer.ppr.OpProcFactory.FilterPPR}
*
* @param procCtx
* @param fop
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedScanOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedScanOptimizer.java
index e31119f..72e4890 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedScanOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedScanOptimizer.java
@@ -69,7 +69,7 @@
* in the query plan and merges them if they meet some preconditions.
*
* TS TS TS
- * | | -> / \
+ * | | -> / \
* Op Op Op Op
*
* Currently it only works with the Tez execution engine.
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
index 23ee3ae..75892e5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
@@ -37,9 +37,7 @@
* convert a regular join to a a map-side join.
*
* @param conf
- * @param opParseCtxMap
* @param op join operator
- * @param joinTree qb join tree
* @param bigTablePos position of the source to be read as part of
* map-reduce framework. All other sources are cached in memory
* @param noCheckOuterJoin
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index e339d0a..b525e82 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -612,7 +612,7 @@ public static boolean orderRelNode(RelNode rel) {
/**
* Get top level select starting from root. Assumption here is root can only
- * be Sort & Project. Also the top project should be at most 2 levels below
+ * be Sort & Project. Also the top project should be at most 2 levels below
* Sort; i.e Sort(Limit)-Sort(OB)-Select
*
* @param rootRel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index a0dfbb1..ef73487 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -120,7 +120,7 @@ public static HiveProject create(RelOptCluster cluster, RelNode child, List ex
* are projected multiple times.
*
*
- * This method could optimize the result as {@link #permute} does, but does
+ * This method could optimize the result as permute does, but does
* not at present.
*
* @param rel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
index b63ea02..6fab3d7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
@@ -78,8 +78,8 @@
* have m+n=a, 2m+n=b where m is the #row in R1 and n is the #row in R2 then
* m=b-a, n=2a-b, m-n=2b-3a
* if it is except (distinct)
- * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
- * else R5 = Fil (2b-3a>0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
+ * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
+ * else R5 = Fil (2b-3a> 0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
* Note that NULLs are handled the same as other values. Please refer to the test cases.
*/
public class HiveExceptRewriteRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index c9cf396..175bbb4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -46,10 +46,10 @@
import com.google.common.collect.Sets;
/** Not an optimization rule.
- * Rule to aid in translation from Calcite tree -> Hive tree.
+ * Rule to aid in translation from Calcite tree -> Hive tree.
* Transforms :
* Left Right Left Right
- * \ / -> \ /
+ * \ / -> \ /
* Join HashExchange HashExchange
* \ /
* Join
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
index 0644f0c..96a2db1 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
@@ -52,11 +52,11 @@
* column statistics (if available).
*
* For instance, given the following predicate:
- * a > 5
+ * a > 5
* we can infer that the predicate will evaluate to false if the max
* value for column a is 4.
*
- * Currently we support the simplification of =, >=, <=, >, <, and
+ * Currently we support the simplification of =, >=, >=, >, >, and
* IN operations.
*/
public class HiveReduceExpressionsWithStatsRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 83d3f74..e45a2ae 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -70,9 +70,9 @@
*
Sub-queries are represented by {@link RexSubQuery} expressions.
*
*
A sub-query may or may not be correlated. If a sub-query is correlated,
- * the wrapped {@link RelNode} will contain a {@link RexCorrelVariable} before
- * the rewrite, and the product of the rewrite will be a {@link Correlate}.
- * The Correlate can be removed using {@link RelDecorrelator}.
+ * the wrapped {@link RelNode} will contain a RexCorrelVariable before
+ * the rewrite, and the product of the rewrite will be a Correlate.
+ * The Correlate can be removed using RelDecorrelator.
*/
public abstract class HiveSubQueryRemoveRule extends RelOptRule{
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 0f6c5b5..fa6fb18 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -68,7 +68,7 @@
* 1. Change the output col/ExprNodeColumn names to external names.
* 2. Verify if we need to use the "KEY."/"VALUE." in RS cols; switch to
* external names if possible.
- * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
+ * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
* differently for different GB/RS in pipeline. Remove the different treatments.
* 4. VirtualColMap needs to be maintained
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
index e1927e9..a5a9d2c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
@@ -203,7 +203,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException {
/**
* Detect correlations and transform the query tree.
*
- * @param pactx
+ * @param pctx
* current parse context
* @throws SemanticException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
index 388399c..42a364e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
@@ -98,7 +98,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple parents
* @return the single parent or null when the input operator has multiple parents and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleParent(Operator> operator,
boolean throwException) throws SemanticException {
@@ -128,7 +128,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple children
* @return the single child or null when the input operator has multiple children and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleChild(Operator> operator,
boolean throwException) throws SemanticException {
@@ -432,8 +432,7 @@ protected static void isNullOperator(Operator> operator) throws SemanticExcept
* @param newOperator the operator will be inserted between child and parent
* @param child
* @param parent
- * @param context
- * @throws HiveException
+ * @throws SemanticException
*/
protected static void insertOperatorBetween(
Operator> newOperator, Operator> parent, Operator> child)
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
index d709e21..4db3e79 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteGBUsingIndex.java
@@ -83,7 +83,6 @@
* @see RewriteCanApplyProcFactory
* @see RewriteParseContextGenerator
* @see RewriteQueryUsingAggregateIndexCtx
- * @see RewriteQueryUsingAggregateIndex
* For test cases, @see ql_rewrite_gbtoidx.q
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
index 5659a72..66ce1bf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
@@ -53,7 +53,7 @@
/**
* Parse the input {@link String} command and generate an operator tree.
- * @param conf
+ * @param queryState
* @param command
* @throws SemanticException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
index dcea0e5..637fa76 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteQueryUsingAggregateIndexCtx.java
@@ -59,7 +59,7 @@
/**
* RewriteQueryUsingAggregateIndexCtx class stores the
- * context for the {@link RewriteQueryUsingAggregateIndex}
+ * context for the RewriteQueryUsingAggregateIndex
* used to rewrite operator plan with index table instead of base table.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
index 1965120..256bdfc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
@@ -119,18 +119,18 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
* Complete dynamic-multi-dimension collection
*
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* Expression Tree : ((c1=1) and (c2=a)) or ( (c1=3) or (c2=b))
@@ -171,7 +171,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
*
* child_nd instanceof ExprNodeConstantDesc
- * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
+ * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
*
*
*
@@ -410,7 +410,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* 2. all other cases, select the directory
* Use case #2:
* Multiple dimension collection represents skewed elements so that walk through tree one by one.
- * Cell is a List representing the value mapping from index path and skewed value.
+ * Cell is a List<String> representing the value mapping from index path and skewed value.
* skewed column: C1, C2, C3
* skewed value: (1,a,x), (2,b,x), (1,c,x), (2,a,y)
* Other: represent value for the column which is not part of skewed value.
@@ -428,8 +428,8 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* ==============
* please see another example in {@link ListBucketingPruner#prune}
* We will use a HasMap to represent the Dynamic-Multiple-Dimension collection:
- * 1. Key is List representing the index path to the cell
- * 2. value represents the cell (Boolean for use case #1, List for case #2)
+ * 1. Key is List<Integer> representing the index path to the cell
+ * 2. value represents the cell (Boolean for use case #1, List<String> for case #2)
* For example:
* 1. skewed column (list): C1, C2, C3
* 2. skewed value (list of list): (1,a,x), (2,b,x), (1,c,x), (2,a,y)
@@ -446,7 +446,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
*
* We use the index,starting at 0. to construct hashmap representing dynamic-multi-dimension
* collection:
- * key (what skewed value key represents) -> value (Boolean for use case #1, List for case
+ * key (what skewed value key represents) -> value (Boolean for use case #1, List<String> for case
* #2).
* (0,0,0) (1,a,x)
* (0,0,1) (1,a,y)
@@ -572,18 +572,18 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* Index: (0,1,2) (0,1,2,3)
*
* Complete dynamic-multi-dimension collection
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* @param uniqSkewedElements
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
index 9377563..65d64e8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
@@ -105,7 +105,7 @@ private void initialize(HiveConf hiveConf) {
* invoke all the resolvers one-by-one, and alter the physical plan.
*
* @return PhysicalContext
- * @throws HiveException
+ * @throws SemanticException
*/
public PhysicalContext optimize() throws SemanticException {
for (PhysicalPlanResolver r : resolvers) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
index 1103d35..2afcb22 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
@@ -47,7 +47,6 @@
* Evaluate expression with partition columns
*
* @param expr
- * @param partSpec
* @param rowObjectInspector
* @return value returned by the expression
* @throws HiveException
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index fc6adaf..6ed5e3e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -215,45 +215,32 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* FILTER operator does not change the average row size but it does change the number of rows
* emitted. The reduction in the number of rows emitted is dependent on the filter expression.
- *
* Notations:
+ *
* - T(S) - Number of tuples in relations S
* - V(S,A) - Number of distinct values of attribute A in relation S
*
+ * Rules:
*
- * Rules:
- * - Column equals a constant
T(S) = T(R) / V(R,A)
- *
- *
- * - Inequality conditions
T(S) = T(R) / 3
- *
- *
- * - Not equals comparison
- Simple formula T(S) = T(R)
- *
- * - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
- *
- *
- * - NOT condition
T(S) = 1 - T(S'), where T(S') is the satisfying condition
- *
- *
- * - Multiple AND conditions
Cascadingly apply the rules 1 to 3 (order doesn't matter)
- *
- *
- * - Multiple OR conditions
- Simple formula is to evaluate conditions independently
- * and sum the results T(S) = m1 + m2
- *
- * - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
- *
+ *
- Column equals a constant T(S) = T(R) / V(R,A)
+ * - Inequality conditions T(S) = T(R) / 3
+ * - Not equals comparison - Simple formula T(S) = T(R)
+ * - - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
+ * - NOT condition T(S) = 1 - T(S'), where T(S') is the satisfying condition
+ * - Multiple AND conditions Cascadingly apply the rules 1 to 3 (order doesn't matter)
+ * - Multiple OR conditions - Simple formula is to evaluate conditions independently
+ * and sum the results T(S) = m1 + m2
+ * - - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
+ *
* where, m1 is the number of tuples that satisfy condition1 and m2 is the number of tuples that
- * satisfy condition2
+ * satisfy condition2
*
- *
* Worst case: If no column statistics are available, then evaluation of predicate
* expression will assume worst case (i.e; half the input rows) for each of predicate expression.
- *
+ *
* For more information, refer 'Estimating The Cost Of Operations' chapter in
* "Database Systems: The Complete Book" by Garcia-Molina et. al.
- *
+ *
*/
public static class FilterStatsRule extends DefaultStatsRule implements NodeProcessor {
@@ -949,7 +936,7 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child,
* available then a better estimate can be found by taking the smaller of product of V(R,[A,B,C])
* (product of distinct cardinalities of A,B,C) and T(R)/2.
*
- * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
+ * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
*
* In the presence of grouping sets, map-side GBY will emit more rows depending on the size of
* grouping set (input rows * size of grouping set). These rows will get reduced because of
@@ -1369,12 +1356,12 @@ private boolean checkMapSideAggregation(GroupByOperator gop,
}
/**
- * JOIN operator can yield any of the following three cases
- The values of join keys are
+ * JOIN operator can yield any of the following three cases
- The values of join keys are
* disjoint in both relations in which case T(RXS) = 0 (we need histograms for this)
- Join
* key is primary key on relation R and foreign key on relation S in which case every tuple in S
- * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
+ * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
* have same value for join-key. Ex: bool column with all true values T(RXS) = T(R) * T(S) (we
- * need histograms for this. counDistinct = 1 and same value)
+ * need histograms for this. counDistinct = 1 and same value)
*
* In the absence of histograms, we can use the following general case
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index fb1a309..e728100 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -98,7 +98,7 @@ public String getName() {
/**
* For every node in this subtree, make sure it's start/stop token's
* are set. Walk depth first, visit bottom up. Only updates nodes
- * with at least one token index < 0.
+ * with at least one token index < 0.
*
* In contrast to the method in the parent class, this method is
* iterative.
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 136e951..0556881 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -804,7 +804,6 @@ private static void generateConstraintInfos(ASTNode child, List columnNa
/**
* Process the foreign keys from the AST and populate the foreign keys in the SQLForeignKey list
- * @param parent Parent of the foreign key token node
* @param child Foreign Key token node
* @param foreignKeys SQLForeignKey list
* @throws SemanticException
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
index 0a5cf00..06c58d3 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java
@@ -417,10 +417,6 @@ public void analyze(ASTNode ast, Context origCtx) throws SemanticException {
/**
* @param ast
* is the original analyze ast
- * @param qb
- * is the qb that calls this function
- * @param sem
- * is the semantic analyzer that calls this function
* @return
* @throws SemanticException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
index ecf3cfc..9b7421e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
@@ -413,7 +413,7 @@ public void setExpressions(ArrayList columns)
/**
* Add order expressions from the list of expressions in the format of ASTNode
- * @param args
+ * @param nodes
*/
public void addExpressions(ArrayList nodes) {
for (int i = 0; i < nodes.size(); i++) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 565fbef..514227e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -128,7 +128,6 @@ public ParseContext() {
}
/**
- * @param conf
* @param opToPartPruner
* map from table scan operator to partition pruner
* @param opToPartList
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 1ea608b..bb4bcec 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -244,7 +244,7 @@ public boolean allowEventReplacementInto(Table table) {
}
/**
- * Returns a predicate filter to filter an Iterable to return all partitions
+ * Returns a predicate filter to filter an Iterable<Partition> to return all partitions
* that the current replication event specification is allowed to replicate-replace-into
*/
public Predicate allowEventReplacementInto() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index ac71565..9d68bf0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -120,7 +120,7 @@ public void setDenominator(int den) {
/**
* Gets the ON part's expression list.
*
- * @return ArrayList
+ * @return ArrayList<ASTNode>
*/
public ArrayList getExprs() {
return exprs;
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 08a8f00..13d3726 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -412,9 +412,7 @@ private void patchUpAfterCTASorMaterializedView(final List cur
/**
* Join keys are expressions based on the select operator. Resolve the expressions so they
* are based on the ReduceSink operator
- * SEL -> RS -> JOIN
+ * SEL -> RS -> JOIN
* @param source
* @param reduceSinkOp
* @return
@@ -623,10 +623,10 @@ public static PrimitiveTypeInfo deriveMinArgumentCast(
* @param inputOp
* Input Hive Operator
* @param startPos
- * starting position in the input operator schema; must be >=0 and <=
+ * starting position in the input operator schema; must be >=0 and <=
* endPos
* @param endPos
- * end position in the input operator schema; must be >=0.
+ * end position in the input operator schema; must be >=0.
* @return List of ExprNodeDesc
*/
public static ArrayList genExprNodeDesc(Operator inputOp, int startPos, int endPos,
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
index 31fcaeb..ef06f48 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
@@ -184,7 +184,6 @@ public void setDefaultDirName(String defaultDirName) {
/**
* check if list bucketing is enabled.
*
- * @param ctx
* @return
*/
public boolean isSkewedStoredAsDir() {
@@ -201,7 +200,6 @@ public boolean isSkewedStoredAsDir() {
* 0: not list bucketing
* int: no. of skewed columns
*
- * @param ctx
* @return
*/
public int calculateListBucketingLevel() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 2120400..4c7529c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -195,10 +195,10 @@ public void removePathToAlias(Path path){
}
/**
- * This is used to display and verify output of "Path -> Alias" in test framework.
+ * This is used to display and verify output of "Path -> Alias" in test framework.
*
- * QTestUtil masks "Path -> Alias" and makes verification impossible.
- * By keeping "Path -> Alias" intact and adding a new display name which is not
+ * QTestUtil masks "Path -> Alias" and makes verification impossible.
+ * By keeping "Path -> Alias" intact and adding a new display name which is not
* masked by QTestUtil by removing prefix.
*
* Notes: we would still be masking for intermediate directories.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index d82973c..222a7ad 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -450,7 +450,7 @@ public static TableDesc getTableDesc(CreateViewDesc crtViewDesc, String cols, St
* Generate the table descriptor of MetadataTypedColumnsetSerDe with the
* separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe
* does not support a table with a single column "col" with type
- * "array".
+ * "array<string>".
*/
public static TableDesc getDefaultTableDesc(String separatorCode) {
return new TableDesc(
@@ -967,7 +967,7 @@ public static String stripQuotes(String val) {
}
/**
- * Remove prefix from "Path -> Alias"
+ * Remove prefix from "Path -> Alias"
* This is required for testing.
* In order to verify that path is right, we need to display it in expected test result.
* But, mask pattern masks path with some patterns.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java
index 7523d01..def16a7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java
@@ -39,9 +39,7 @@
public RenamePartitionDesc() {
}
- /**
- * @param dbName
- * database to add to.
+ /**
* @param tableName
* table to add to.
* @param oldPartSpec
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
index 2b12691..1e026aa 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
@@ -85,7 +85,7 @@ public String getDatabaseName() {
}
/**
- * @param databaseName
+ * @param dbName
* the dbName to set
*/
public void setDatabaseName(String dbName) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
index 48522c5..c963211 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
@@ -77,7 +77,7 @@ public ShowFunctionsDesc(Path resFile, String pattern) {
/**
* @param pattern
* names of tables to show
- * @param like
+ * @param isLikePattern
* is like keyword used
*/
public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
index 0fa50f1..2e0059c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
@@ -32,7 +32,7 @@
* 1. It's position in table column is 1.
* 2. It's position in skewed column list is 0.
*
- * This information will be used in {@FileSinkOperator} generateListBucketingDirName
+ * This information will be used in {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator} generateListBucketingDirName
*/
public class SkewedColumnPositionPair {
private int tblColPosition;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 9ca5544..91561d8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -274,7 +274,6 @@ public SparkEdgeProperty getEdgeProperty(BaseWork a, BaseWork b) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b, SparkEdgeProperty edgeProp) {
workGraph.get(a).add(b);
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
index a037ea3..15c0b7d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
@@ -370,7 +370,6 @@ public int compareTo(Dependency o) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b,
TezEdgeProperty edgeProp) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
index 68f289e..a5bb24c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
@@ -23,8 +23,8 @@
/**
- * All member variables should have a setters and getters of the form get and set or else they won't be recreated properly at run
+ * All member variables should have a setters and getters of the form get<member
+ * name> and set<member name> or else they won't be recreated properly at run
* time.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
index f9a8725..96c3401 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
@@ -34,17 +34,17 @@
private static final long serialVersionUID = 1L;
/**
- * GLOBAL No key. All rows --> 1 full aggregation on end of input
+ * GLOBAL No key. All rows --> 1 full aggregation on end of input
*
- * HASH Rows aggregated in to hash table on group key -->
+ * HASH Rows aggregated in to hash table on group key -->
* 1 partial aggregation per key (normally, unless there is spilling)
*
* MERGE_PARTIAL As first operator in a REDUCER, partial aggregations come grouped from
- * reduce-shuffle -->
+ * reduce-shuffle -->
* aggregate the partial aggregations and emit full aggregation on
* endGroup / closeOp
*
- * STREAMING Rows come from PARENT operator already grouped -->
+ * STREAMING Rows come from PARENT operator already grouped -->
* aggregate the rows and emit full aggregation on key change / closeOp
*
* NOTE: Hash can spill partial result rows prematurely if it runs low on memory.
@@ -115,16 +115,16 @@ public void setProjectedOutputColumns(int[] projectedOutputColumns) {
*
* Decides using GroupByDesc.Mode and whether there are keys.
*
- * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
+ * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
*
- * Mode.HASH --> ProcessingMode.HASH
+ * Mode.HASH --> ProcessingMode.HASH
*
- * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
+ * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
*
* Mode.PARTIAL1,
* Mode.PARTIAL2,
* Mode.PARTIALS,
- * Mode.FINAL --> ProcessingMode.STREAMING
+ * Mode.FINAL --> ProcessingMode.STREAMING
*
*/
public static ProcessingMode groupByDescModeToVectorProcessingMode(GroupByDesc.Mode mode,
diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
index a0c4008..a6514ca 100644
--- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
+++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
@@ -53,9 +53,9 @@
* plan generation adds filters where they are seen but in some instances some
* of the filter expressions can be pushed nearer to the operator that sees this
* particular data for the first time. e.g. select a.*, b.* from a join b on
- * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
+ * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
*
- * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
+ * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
* predicate pushdown, would be evaluated after the join processing has been
* done. Suppose the two predicates filter out most of the rows from a and b,
* the join is unnecessarily processing these rows. With predicate pushdown,
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
index e78ea45..c9fd64c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
@@ -28,7 +28,7 @@
* CommandProcessor interface. Typically errorMessage
* and SQLState will only be set if the responseCode
* is not 0. Note that often {@code responseCode} ends up the exit value of
- * command shell process so should keep it to < 127.
+ * command shell process so should keep it to < 127.
*/
public class CommandProcessorResponse {
private final int responseCode;
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
index 1acdc95..d0225bf 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
@@ -37,7 +37,7 @@
/**
* This class processes HADOOP commands used for HDFS encryption. It is meant to be run
- * only by Hive unit & queries tests.
+ * only by Hive unit & queries tests.
*/
public class CryptoProcessor implements CommandProcessor {
public static final Logger LOG = LoggerFactory.getLogger(CryptoProcessor.class.getName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
index 04e5565..b8d938a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
@@ -167,7 +167,6 @@ public static HivePrivilegeObject getHiveObjectRef(HiveObjectRef privObj) throws
* Convert authorization plugin principal type to thrift principal type
* @param type
* @return
- * @throws HiveException
*/
public static PrincipalType getThriftPrincipalType(HivePrincipalType type) {
if(type == null){
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
index 5c2f389..40884c4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
@@ -63,7 +63,7 @@ public Integer getToken() {
/**
* Do case lookup of PrivilegeType associated with this antlr token
- * @param privilegeName
+ * @param token
* @return corresponding PrivilegeType
*/
public static PrivilegeType getPrivTypeByToken(int token) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
index 4814fc1..25e0a88 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
@@ -87,7 +87,7 @@ void revokePrivileges(List hivePrincipals, List hi
/**
* Create role
* @param roleName
- * @param adminGrantor - The user in "[ WITH ADMIN ]" clause of "create role"
+ * @param adminGrantor - The user in "[ WITH ADMIN <user> ]" clause of "create role"
* @throws HiveAuthzPluginException
* @throws HiveAccessControlException
*/
@@ -232,7 +232,7 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* returned, the Object has to be of type HiveAuthorizationTranslator
*
* @return
- * @throws HiveException
+ * @throws HiveAuthzPluginException
*/
Object getHiveAuthorizationTranslator() throws HiveAuthzPluginException;
@@ -246,19 +246,19 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* (part 1) It expects a valid filter condition to be returned. Null indicates no filtering is
* required.
*
- * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
+ * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
*
* (part 2) It expects a valid expression as used in a select clause. Null
* is NOT a valid option. If no transformation is needed simply return the
* column name.
*
- * Example: column a -> "a" (no transform)
+ * Example: column a -> "a" (no transform)
*
- * Example: column a -> "reverse(a)" (call the reverse function on a)
+ * Example: column a -> "reverse(a)" (call the reverse function on a)
*
- * Example: column a -> "5" (replace column a with the constant 5)
+ * Example: column a -> "5" (replace column a with the constant 5)
*
- * @return List
+ * @return List<HivePrivilegeObject>
* please return the list of HivePrivilegeObjects that need to be rewritten.
*
* @throws SemanticException
@@ -271,7 +271,6 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* Returning false short-circuits the generation of row/column transforms.
*
* @return
- * @throws SemanticException
*/
public boolean needTransform();
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 41983f1..414004a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -136,7 +136,7 @@ public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String o
}
/**
- * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType.COMMAND_PARAMS}
+ * Create HivePrivilegeObject of type {@link HivePrivilegeObject.HivePrivilegeObjectType#COMMAND_PARAMS}
* @param cmdParams
* @return
*/
@@ -204,7 +204,7 @@ public HivePrivObjectActionType getActionType() {
}
/**
- * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType.TABLE}
+ * Applicable columns in this object, when the type is {@link HivePrivilegeObject.HivePrivilegeObjectType#TABLE_OR_VIEW}
* In case of DML read operations, this is the set of columns being used.
* Column information is not set for DDL operations and for tables being written into
* @return list of applicable columns
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
index dacf7a9..394852f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
@@ -27,9 +27,6 @@
/**
* This method connects to the temporary storage.
*
- * @param hconf
- * HiveConf that contains the connection parameters.
- * @param sourceTask
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext scc);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
index 3631b83..797096c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
@@ -31,15 +31,12 @@
* database (if not exist).
* This method is usually called in the Hive client side rather than by the mappers/reducers
* so that it is initialized only once.
- * @param hconf HiveConf that contains the configurations parameters used to connect to
- * intermediate stats database.
* @return true if initialization is successful, false otherwise.
*/
public boolean init(StatsCollectionContext context);
/**
* This method connects to the intermediate statistics database.
- * @param hconf HiveConf that contains the connection parameters.
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext context);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 76f7dae..331ec95 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -655,7 +655,7 @@ private static boolean containsNonPositives(List vals) {
}
/**
- * Get sum of all values in the list that are >0
+ * Get sum of all values in the list that are >0
* @param vals
* - list of values
* @return sum
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
index e8f7e15..621405c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
@@ -30,7 +30,6 @@
/**
* Add data to UDF prior to initialization.
* An exception may be thrown if the UDF doesn't know what to do with this data.
- * @param params UDF-specific data to add to the UDF
*/
void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
index 624afd1..e14c59a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
@@ -139,7 +139,7 @@ private void char2byte(int radix, int fromPos) {
}
/**
- * Convert numbers between different number bases. If toBase>0 the result is
+ * Convert numbers between different number bases. If toBase>0 the result is
* unsigned, otherwise it is signed.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
index fa0fe53..9d44001 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
@@ -36,7 +36,7 @@
* 'Ref' parse_url('http://facebook.com/path/p1.php?query=1#Ref', 'PROTOCOL')
* will return 'http' Possible values are
* HOST,PATH,QUERY,REF,PROTOCOL,AUTHORITY,FILE,USERINFO Also you can get a value
- * of particular key in QUERY, using syntax QUERY: eg: QUERY:k1.
+ * of particular key in QUERY, using syntax QUERY:<KEY_NAME> eg: QUERY:k1.
*/
@Description(name = "parse_url",
value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL",
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
index 449848a..12a5b5d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
@@ -75,7 +75,7 @@ public DoubleWritable evaluate(LongWritable a) {
/**
* Get the sign of the decimal input
*
- * @param dec decimal input
+ * @param decWritable decimal input
*
* @return -1, 0, or 1 representing the sign of the input decimal
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
index 61b777a..633c7d7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
@@ -47,12 +47,12 @@
* Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_(A,B) = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/(n_A+n_B)
@@ -136,12 +136,12 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* algorithm, based on work by Philippe Pébay and Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
index 8221c1b..fb62487 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
@@ -44,10 +44,10 @@
* Arbitrary-Order Statistical Moments", Philippe Pebay, Sandia Labs):
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
@@ -128,10 +128,10 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* http://infoserve.sandia.gov/sand_doc/2008/086212.pdf
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
index 3a98276..2acd7ff 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
@@ -44,7 +44,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDAF that accepts
- * array, array> and so on (arbitrary levels of nesting).
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting).
*/
@UDFType(deterministic = true)
public abstract class GenericUDAFEvaluator implements Closeable {
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
index 376b73e..544f7ab 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.io.IntWritable;
/**
- * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
+ * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
*
*/
public abstract class GenericUDAFLeadLag extends AbstractGenericUDAFResolver {
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
index 6b67dea..5b7acd2 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
@@ -61,7 +61,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDF that accepts
- * array, array> and so on (arbitrary levels of nesting). 4. It
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting). 4. It
* can do short-circuit evaluations using DeferedObject.
*/
@UDFType(deterministic = true)
@@ -216,7 +216,7 @@ public void close() throws IOException {
/**
* Some functions like comparisons may be affected by appearing order of arguments.
- * This is to convert a function, such as 3 > x to x < 3. The flip function of
+ * This is to convert a function, such as 3 > x to x < 3. The flip function of
* GenericUDFOPGreaterThan is GenericUDFOPLessThan.
*/
public GenericUDF flip() {
@@ -227,7 +227,6 @@ public GenericUDF flip() {
* Gets the negative function of the current one. E.g., GenericUDFOPNotEqual for
* GenericUDFOPEqual, or GenericUDFOPNull for GenericUDFOPNotNull.
* @return Negative function
- * @throws UDFArgumentException
*/
public GenericUDF negative() {
throw new UnsupportedOperationException("Negative function doesn't exist for " + getFuncName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
index 606cb22..e415ad6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
@@ -37,7 +37,7 @@
/**
* Generic UDF for string function
- * CONCAT_WS(sep, [string | array(string)]+).
+ * CONCAT_WS(sep, [string | array(string)]+).
* This mimics the function from
* MySQL http://dev.mysql.com/doc/refman/5.0/en/string-functions.html#
* function_concat-ws
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
index e1fdc41..7667301 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
@@ -67,7 +67,7 @@
/**
* IF(expr1,expr2,expr3)
- * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
+ * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
* otherwise it returns expr3. IF() returns a numeric or string value, depending
* on the context in which it is used.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
index 4d9691e..b6de9e9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
@@ -40,7 +40,7 @@
* GenericUDFTimestamp
*
* Example usage:
- * ... CAST( as TIMESTAMP) ...
+ * ... CAST(<Timestamp string> as TIMESTAMP) ...
*
* Creates a TimestampWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
index e644320..5e0a33c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalDayTime
*
* Example usage:
-* ... CAST( as INTERVAL DAY TO SECOND) ...
+* ... CAST(<Interval string> as INTERVAL DAY TO SECOND) ...
*
* Creates a HiveIntervalDayTimeWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
index 92a40f8..0008008 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalYearMonth
*
* Example usage:
-* ... CAST( as INTERVAL YEAR TO MONTH) ...
+* ... CAST(<Interval string> as INTERVAL YEAR TO MONTH) ...
*
* Creates a HiveIntervalYearMonthWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
index a93a264..987df50 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
@@ -41,7 +41,7 @@
* Additionally setup GenericUDTF with MapredContext before initializing.
* This is only called in runtime of MapRedTask.
*
- * @param context context
+ * @param mapredContext context
*/
public void configure(MapredContext mapredContext) {
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
index cc2b77b..7611404 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
@@ -70,7 +70,7 @@
* where the first occurrence was LATE, followed by zero or more EARLY flights,
* followed by a ONTIME or EARLY flight.
* symbols specify a list of name, expression pairs. For e.g.
- * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
+ * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
* These symbols can be used in the Pattern defined above.
* resultSelectList specified as a select list.
* The expressions in the selectList are evaluated in the context where all the
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
index 7b30838..0da1e19 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
@@ -62,7 +62,7 @@
* Based on Hive {@link GenericUDAFEvaluator}. Break up the responsibility of the old AbstractTableFunction
* class into a Resolver and Evaluator.
*
- * The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ * The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
* The Evaluator is responsible for providing the 2 execute methods:
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
index 71034d7..c953710 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
@@ -36,15 +36,15 @@
* old AbstractTableFunction class into a Resolver and Evaluator.
* The Resolver is responsible for:
*
- * - setting up the {@link tableFunctionEvaluator}
+ *
- setting up the {@link TableFunctionEvaluator}
*
- Setting up the The raw and output ObjectInspectors of the Evaluator.
- *
- The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ *
- The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
*
* The Resolver for a function is obtained from the {@link FunctionRegistry}. The Resolver is initialized
* by the following 4 step process:
*
- * - The initialize method is called; which is passed the {@link PTFDesc} and the {@link TableFunctionDef}.
+ *
- The initialize method is called; which is passed the {@link PTFDesc} and the {@link PartitionedTableFunctionDef}.
*
- The resolver is then asked to setup the Raw ObjectInspector. This is only required if the Function reshapes
* the raw input.
*
- Once the Resolver has had a chance to compute the shape of the Raw Input that is fed to the partitioning
@@ -110,8 +110,6 @@ public TableFunctionEvaluator getEvaluator() {
* exist for all the Def (ArgDef, ColumnDef, WindowDef..). It is the responsibility of
* the TableFunction to construct the {@link ExprNodeEvaluator evaluators} and setup the OI.
*
- * @param tblFuncDef
- * @param ptfDesc
* @throws HiveException
*/
public abstract void initializeOutputOI() throws HiveException;