stack, NodeProcessorCtx procCtx,
/**
* Generate predicate.
*
- * Subclass should implement the function. Please refer to {@link OpProcFactory.FilterPPR}
+ * Subclass should implement the function. Please refer to {@link org.apache.hadoop.hive.ql.optimizer.ppr.OpProcFactory.FilterPPR}
*
* @param procCtx
* @param fop
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
index c953e036f6..f70a6dcbfd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java
@@ -96,7 +96,7 @@
* in the query plan and merge them if they met some preconditions.
*
* TS TS TS
- * | | -> / \
+ * | | -> / \
* Op Op Op Op
*
* Now the rule has been extended to find opportunities to other operators
@@ -105,7 +105,7 @@
* TS1 TS2 TS1 TS2 TS1 TS2
* | | | | | |
* | RS | RS | RS
- * \ / \ / -> \ /
+ * \ / \ / -> \ /
* MapJoin MapJoin MapJoin
* | | / \
* Op Op Op Op
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
index a5400d6b27..e581665950 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SparkMapJoinProcessor.java
@@ -37,9 +37,7 @@
* convert a regular join to a a map-side join.
*
* @param conf
- * @param opParseCtxMap
* @param op join operator
- * @param joinTree qb join tree
* @param bigTablePos position of the source to be read as part of
* map-reduce framework. All other sources are cached in memory
* @param noCheckOuterJoin
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index 1f8a48c7ad..6ed8b92178 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -613,7 +613,7 @@ public static boolean orderRelNode(RelNode rel) {
/**
* Get top level select starting from root. Assumption here is root can only
- * be Sort & Project. Also the top project should be at most 2 levels below
+ * be Sort & Project. Also the top project should be at most 2 levels below
* Sort; i.e Sort(Limit)-Sort(OB)-Select
*
* @param rootRel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
index 67312a4ee1..f29b1f3c26 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java
@@ -120,7 +120,7 @@ public static HiveProject create(RelOptCluster cluster, RelNode child, List ex
* are projected multiple times.
*
*
- * This method could optimize the result as {@link #permute} does, but does
+ * This method could optimize the result as permute does, but does
* not at present.
*
* @param rel
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
index 600c7c0d07..1d10c60d77 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExceptRewriteRule.java
@@ -68,8 +68,8 @@
* have m+n=a, 2m+n=b where m is the #row in R1 and n is the #row in R2 then
* m=b-a, n=2a-b, m-n=2b-3a
* if it is except (distinct)
- * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
- * else R5 = Fil (2b-3a>0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
+ * then R5 = Fil (b-a>0 && 2a-b=0) R6 = select only keys from R5
+ * else R5 = Fil (2b-3a> 0) R6 = UDTF (R5) which will explode the tuples based on 2b-3a.
* Note that NULLs are handled the same as other values. Please refer to the test cases.
*/
public class HiveExceptRewriteRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
index c331eab37d..0c8c5e1a8e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
@@ -46,10 +46,10 @@
import com.google.common.collect.Sets;
/** Not an optimization rule.
- * Rule to aid in translation from Calcite tree -> Hive tree.
+ * Rule to aid in translation from Calcite tree -> Hive tree.
* Transforms :
* Left Right Left Right
- * \ / -> \ /
+ * \ / -> \ /
* Join HashExchange HashExchange
* \ /
* Join
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
index e231b1d242..1e39a1bf79 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePointLookupOptimizerRule.java
@@ -74,8 +74,8 @@
*
* Similarily
*
- * v1 <= c1 and c1 <= v2
- *
+ * v1 <= c1 and c1 <= v2
+ *
* is rewritten to c1 between v1 and v2
*/
public abstract class HivePointLookupOptimizerRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
index f7712e6c33..cdc94d5629 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsWithStatsRule.java
@@ -52,12 +52,12 @@
* column statistics (if available).
*
* For instance, given the following predicate:
- * a > 5
+ * a > 5
* we can infer that the predicate will evaluate to false if the max
* value for column a is 4.
*
* Currently we support the simplification of:
- * - =, >=, <=, >, <
+ * - =, >=, <=, >, <
* - IN
* - IS_NULL / IS_NOT_NULL
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 7ab4e125cc..50ed8eda89 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -69,9 +69,9 @@
* Sub-queries are represented by {@link RexSubQuery} expressions.
*
*
A sub-query may or may not be correlated. If a sub-query is correlated,
- * the wrapped {@link RelNode} will contain a {@link RexCorrelVariable} before
- * the rewrite, and the product of the rewrite will be a {@link Correlate}.
- * The Correlate can be removed using {@link RelDecorrelator}.
+ * the wrapped {@link RelNode} will contain a {@link org.apache.calcite.rex.RexCorrelVariable} before
+ * the rewrite, and the product of the rewrite will be a {@link org.apache.calcite.rel.core.Correlate}.
+ * The Correlate can be removed using {@link org.apache.calcite.sql2rel.RelDecorrelator}.
*/
public class HiveSubQueryRemoveRule extends RelOptRule {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
index 8f96288aa7..c51ae0d879 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCAggregationPushDownRule.java
@@ -36,7 +36,7 @@
/**
* JDBCAggregationPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcAggregate}
+ * into a {@link JdbcAggregate}
* and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}
* operator so it will be sent to the external table.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
index 5c03f87361..0e88f53817 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/jdbc/JDBCProjectPushDownRule.java
@@ -33,7 +33,7 @@
/**
* JDBCProjectPushDownRule convert a {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject}
- * into a {@link org.apache.calcite.adapter.jdbc.JdbcRules.JdbcAggregateRule.JdbcProject}
+ * into a {@link JdbcProject}
* and pushes it down below the {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter}}
* operator so it will be sent to the external table.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
index aabd75ee1f..a8eb070afc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateIncrementalRewritingRule.java
@@ -51,7 +51,7 @@
* SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
* FROM TAB_A
* JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- * WHERE TAB_A.ROW_ID > 5
+ * WHERE TAB_A.ROW_ID > 5
* GROUP BY a, b) inner_subq
* GROUP BY a, b;
*
@@ -61,10 +61,10 @@
* SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
* FROM TAB_A
* JOIN TAB_B ON (TAB_A.a = TAB_B.z)
- * WHERE TAB_A.ROW_ID > 5
+ * WHERE TAB_A.ROW_ID > 5
* GROUP BY a, b) source
* ON (mv.a = source.a AND mv.b = source.b)
- * WHEN MATCHED AND mv.c + source.c <> 0
+ * WHEN MATCHED AND mv.c + source.c <> 0
* THEN UPDATE SET mv.s = mv.s + source.s, mv.c = mv.c + source.c
* WHEN NOT MATCHED
* THEN INSERT VALUES (source.a, source.b, s, c);
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 70f83433d8..b304e38edd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -69,7 +69,7 @@
* 1. Change the output col/ExprNodeColumn names to external names.
* 2. Verify if we need to use the "KEY."/"VALUE." in RS cols; switch to
* external names if possible.
- * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
+ * 3. In ExprNode & in ColumnInfo the tableAlias/VirtualColumn is specified
* differently for different GB/RS in pipeline. Remove the different treatments.
* 4. VirtualColMap needs to be maintained
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
index 40cfcf5a8f..9377fd282c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java
@@ -203,7 +203,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException {
/**
* Detect correlations and transform the query tree.
*
- * @param pactx
+ * @param pctx
* current parse context
* @throws SemanticException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
index c553dcaa88..d2cf78bee5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
@@ -97,7 +97,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple parents
* @return the single parent or null when the input operator has multiple parents and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleParent(Operator> operator,
boolean throwException) throws SemanticException {
@@ -127,7 +127,7 @@ protected static boolean hasGroupingSet(ReduceSinkOperator cRS) throws SemanticE
* @param throwException if throw a exception when the input operator has multiple children
* @return the single child or null when the input operator has multiple children and
* throwException is false;
- * @throws HiveException
+ * @throws SemanticException
*/
protected static Operator> getSingleChild(Operator> operator,
boolean throwException) throws SemanticException {
@@ -477,8 +477,7 @@ protected static void isNullOperator(Operator> operator) throws SemanticExcept
* @param newOperator the operator will be inserted between child and parent
* @param child
* @param parent
- * @param context
- * @throws HiveException
+ * @throws SemanticException
*/
protected static void insertOperatorBetween(
Operator> newOperator, Operator> parent, Operator> child)
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
index 06498eb637..076a9961c7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPruner.java
@@ -119,18 +119,18 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
* Complete dynamic-multi-dimension collection
*
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* Expression Tree : ((c1=1) and (c2=a)) or ( (c1=3) or (c2=b))
@@ -171,7 +171,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException {
*
*
* child_nd instanceof ExprNodeConstantDesc
- * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
+ * && ((ExprNodeConstantDesc) child_nd).getValue() == null)
*
*
*
@@ -410,7 +410,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* 2. all other cases, select the directory
* Use case #2:
* Multiple dimension collection represents skewed elements so that walk through tree one by one.
- * Cell is a List representing the value mapping from index path and skewed value.
+ * Cell is a List<String> representing the value mapping from index path and skewed value.
* skewed column: C1, C2, C3
* skewed value: (1,a,x), (2,b,x), (1,c,x), (2,a,y)
* Other: represent value for the column which is not part of skewed value.
@@ -428,8 +428,8 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* ==============
* please see another example in {@link ListBucketingPruner#prune}
* We will use a HasMap to represent the Dynamic-Multiple-Dimension collection:
- * 1. Key is List representing the index path to the cell
- * 2. value represents the cell (Boolean for use case #1, List for case #2)
+ * 1. Key is List<Integer> representing the index path to the cell
+ * 2. value represents the cell (Boolean for use case #1, List<String> for case #2)
* For example:
* 1. skewed column (list): C1, C2, C3
* 2. skewed value (list of list): (1,a,x), (2,b,x), (1,c,x), (2,a,y)
@@ -446,7 +446,7 @@ private static void decideDefaultDirSelection(Partition part, List selecte
*
* We use the index,starting at 0. to construct hashmap representing dynamic-multi-dimension
* collection:
- * key (what skewed value key represents) -> value (Boolean for use case #1, List for case
+ * key (what skewed value key represents) -> value (Boolean for use case #1, List<String> for case
* #2).
* (0,0,0) (1,a,x)
* (0,0,1) (1,a,y)
@@ -572,18 +572,18 @@ private static void decideDefaultDirSelection(Partition part, List selecte
* Index: (0,1,2) (0,1,2,3)
*
* Complete dynamic-multi-dimension collection
- * (0,0) (1,a) * -> T
- * (0,1) (1,b) -> T
- * (0,2) (1,c) *-> F
- * (0,3) (1,other)-> F
- * (1,0) (2,a)-> F
- * (1,1) (2,b) * -> T
- * (1,2) (2,c)-> F
- * (1,3) (2,other)-> F
- * (2,0) (other,a) -> T
- * (2,1) (other,b) -> T
- * (2,2) (other,c) -> T
- * (2,3) (other,other) -> T
+ * (0,0) (1,a) * -> T
+ * (0,1) (1,b) -> T
+ * (0,2) (1,c) *-> F
+ * (0,3) (1,other)-> F
+ * (1,0) (2,a)-> F
+ * (1,1) (2,b) * -> T
+ * (1,2) (2,c)-> F
+ * (1,3) (2,other)-> F
+ * (2,0) (other,a) -> T
+ * (2,1) (other,b) -> T
+ * (2,2) (other,c) -> T
+ * (2,3) (other,other) -> T
* * is skewed value entry
*
* @param uniqSkewedElements
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
index 6c6908a9e8..8903eb7381 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java
@@ -104,7 +104,7 @@ private void initialize(HiveConf hiveConf) {
* invoke all the resolvers one-by-one, and alter the physical plan.
*
* @return PhysicalContext
- * @throws HiveException
+ * @throws SemanticException
*/
public PhysicalContext optimize() throws SemanticException {
for (PhysicalPlanResolver r : resolvers) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
index 691e9428d2..03324a6a1d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java
@@ -47,7 +47,6 @@
* Evaluate expression with partition columns
*
* @param expr
- * @param partSpec
* @param rowObjectInspector
* @return value returned by the expression
* @throws HiveException
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index 6aeb2a856f..6ba3f90505 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -224,45 +224,32 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx,
/**
* FILTER operator does not change the average row size but it does change the number of rows
* emitted. The reduction in the number of rows emitted is dependent on the filter expression.
- *
* Notations:
+ *
* - T(S) - Number of tuples in relations S
* - V(S,A) - Number of distinct values of attribute A in relation S
*
+ * Rules:
*
- * Rules:
- * - Column equals a constant
T(S) = T(R) / V(R,A)
- *
- *
- * - Inequality conditions
T(S) = T(R) / 3
- *
- *
- * - Not equals comparison
- Simple formula T(S) = T(R)
- *
- * - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
- *
- *
- * - NOT condition
T(S) = 1 - T(S'), where T(S') is the satisfying condition
- *
- *
- * - Multiple AND conditions
Cascadingly apply the rules 1 to 3 (order doesn't matter)
- *
- *
- * - Multiple OR conditions
- Simple formula is to evaluate conditions independently
- * and sum the results T(S) = m1 + m2
- *
- * - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
- *
+ *
- Column equals a constant T(S) = T(R) / V(R,A)
+ * - Inequality conditions T(S) = T(R) / 3
+ * - Not equals comparison - Simple formula T(S) = T(R)
+ * - - Alternate formula T(S) = T(R) (V(R,A) - 1) / V(R,A)
+ * - NOT condition T(S) = 1 - T(S'), where T(S') is the satisfying condition
+ * - Multiple AND conditions Cascadingly apply the rules 1 to 3 (order doesn't matter)
+ * - Multiple OR conditions - Simple formula is to evaluate conditions independently
+ * and sum the results T(S) = m1 + m2
+ * - - Alternate formula T(S) = T(R) * ( 1 - ( 1 - m1/T(R) ) * ( 1 - m2/T(R) ))
+ *
* where, m1 is the number of tuples that satisfy condition1 and m2 is the number of tuples that
- * satisfy condition2
+ * satisfy condition2
*
- *
* Worst case: If no column statistics are available, then evaluation of predicate
* expression will assume worst case (i.e; half the input rows) for each of predicate expression.
- *
+ *
* For more information, refer 'Estimating The Cost Of Operations' chapter in
* "Database Systems: The Complete Book" by Garcia-Molina et. al.
- *
+ *
*/
public static class FilterStatsRule extends DefaultStatsRule implements NodeProcessor {
@@ -1201,7 +1188,7 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child,
* available then a better estimate can be found by taking the smaller of product of V(R,[A,B,C])
* (product of distinct cardinalities of A,B,C) and T(R)/2.
*
- * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
+ * T(R) = min (T(R)/2 , V(R,[A,B,C]) ---> [1]
*
* In the presence of grouping sets, map-side GBY will emit more rows depending on the size of
* grouping set (input rows * size of grouping set). These rows will get reduced because of
@@ -1645,12 +1632,12 @@ private boolean checkMapSideAggregation(GroupByOperator gop,
}
/**
- * JOIN operator can yield any of the following three cases
- The values of join keys are
+ * JOIN operator can yield any of the following three cases
- The values of join keys are
* disjoint in both relations in which case T(RXS) = 0 (we need histograms for this)
- Join
* key is primary key on relation R and foreign key on relation S in which case every tuple in S
- * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
+ * will have a tuple in R T(RXS) = T(S) (we need histograms for this)
- Both R & S relation
* have same value for join-key. Ex: bool column with all true values T(RXS) = T(R) * T(S) (we
- * need histograms for this. counDistinct = 1 and same value)
+ * need histograms for this. counDistinct = 1 and same value)
*
* In the absence of histograms, we can use the following general case
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 7b32020f1a..746d0dc55e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -100,7 +100,7 @@ public String getName() {
/**
* For every node in this subtree, make sure it's start/stop token's
* are set. Walk depth first, visit bottom up. Only updates nodes
- * with at least one token index < 0.
+ * with at least one token index < 0.
*
* In contrast to the method in the parent class, this method is
* iterative.
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
index 41e3754cdd..4b2958af2b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/AcidExportSemanticAnalyzer.java
@@ -77,7 +77,7 @@ protected void analyze(ASTNode tree) throws SemanticException {
* were generated. It may also contain insert events that belong to transactions that aborted
* where the same constraints apply.
* In order to make the export artifact free of these constraints, the export does a
- * insert into tmpTable select * from to filter/apply the events in current
+ * insert into tmpTable select * from <export table> to filter/apply the events in current
* context and then export the tmpTable. This export artifact can now be imported into any
* table on any cluster (subject to schema checks etc).
* See {@link #analyzeAcidExport(ASTNode)}
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
index f5d79ed5ab..e385d4e755 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java
@@ -413,7 +413,7 @@ public void setExpressions(ArrayList columns)
/**
* Add order expressions from the list of expressions in the format of ASTNode
- * @param args
+ * @param nodes
*/
public void addExpressions(ArrayList nodes) {
for (int i = 0; i < nodes.size(); i++) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 055d454b21..48213d1b4e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -210,7 +210,7 @@ public boolean allowEventReplacementInto(Map params){
}
/**
- * Returns a predicate filter to filter an Iterable to return all partitions
+ * Returns a predicate filter to filter an Iterable<Partition> to return all partitions
* that the current replication event specification is allowed to replicate-replace-into
*/
public Predicate allowEventReplacementInto() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index c31666e419..3734882e9b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -120,7 +120,7 @@ public void setDenominator(int den) {
/**
* Gets the ON part's expression list.
*
- * @return ArrayList
+ * @return ArrayList<ASTNode>
*/
public ArrayList getExprs() {
return exprs;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 93641af215..d70353e358 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -929,7 +929,7 @@ public boolean getIsCascade() {
}
/**
- * @param cascade the isCascade to set
+ * @param isCascade the isCascade to set
*/
public void setIsCascade(boolean isCascade) {
this.isCascade = isCascade;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index ce85d40653..b693fdb845 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -86,12 +86,11 @@ public CreateViewDesc() {
* @param tblProps
* @param partColNames
* @param ifNotExists
- * @param orReplace
+ * @param replace
* @param isAlterViewAs
* @param inputFormat
* @param outputFormat
* @param location
- * @param serName
* @param serde
* @param storageHandler
* @param serdeProps
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
index f9d545f040..ffb81b54b9 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExportWork.java
@@ -102,7 +102,7 @@ public MmContext getMmContext() {
* For exporting Acid table, change the "pointer" to the temp table.
* This has to be done after the temp table is populated and all necessary Partition objects
* exist in the metastore.
- * See {@link org.apache.hadoop.hive.ql.parse.AcidExportAnalyzer#isAcidExport(ASTNode)}
+ * See {@link org.apache.hadoop.hive.ql.parse.AcidExportSemanticAnalyzer#isAcidExport(ASTNode)}
* for more info.
*/
public void acidPostProcess(Hive db) throws HiveException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index 9febee4043..80ce787bba 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -416,7 +416,7 @@ private static ExprNodeDesc backtrack(ExprNodeColumnDesc column, Operator> cur
/**
* Join keys are expressions based on the select operator. Resolve the expressions so they
* are based on the ReduceSink operator
- * SEL -> RS -> JOIN
+ * SEL -> RS -> JOIN
* @param source
* @param reduceSinkOp
* @return
@@ -666,10 +666,10 @@ public static PrimitiveTypeInfo deriveMinArgumentCast(
* @param inputOp
* Input Hive Operator
* @param startPos
- * starting position in the input operator schema; must be >=0 and <=
+ * starting position in the input operator schema; must be >=0 and <=
* endPos
* @param endPos
- * end position in the input operator schema; must be >=0.
+ * end position in the input operator schema; must be >=0.
* @return List of ExprNodeDesc
*/
public static ArrayList genExprNodeDesc(Operator inputOp, int startPos, int endPos,
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
index 5f8cf54d57..86d4fefb7d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ListBucketingCtx.java
@@ -184,7 +184,6 @@ public void setDefaultDirName(String defaultDirName) {
/**
* check if list bucketing is enabled.
*
- * @param ctx
* @return
*/
public boolean isSkewedStoredAsDir() {
@@ -201,7 +200,6 @@ public boolean isSkewedStoredAsDir() {
* 0: not list bucketing
* int: no. of skewed columns
*
- * @param ctx
* @return
*/
public int calculateListBucketingLevel() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index d5a30da419..bb063c52be 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -214,10 +214,10 @@ public void removePathToAlias(Path path){
}
/**
- * This is used to display and verify output of "Path -> Alias" in test framework.
+ * This is used to display and verify output of "Path -> Alias" in test framework.
*
- * QTestUtil masks "Path -> Alias" and makes verification impossible.
- * By keeping "Path -> Alias" intact and adding a new display name which is not
+ * QTestUtil masks "Path -> Alias" and makes verification impossible.
+ * By keeping "Path -> Alias" intact and adding a new display name which is not
* masked by QTestUtil by removing prefix.
*
* Notes: we would still be masking for intermediate directories.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 76cf54ec3f..33a5371d1e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -1008,7 +1008,7 @@ public static String stripQuotes(String val) {
}
/**
- * Remove prefix from "Path -> Alias"
+ * Remove prefix from "Path -> Alias"
* This is required for testing.
* In order to verify that path is right, we need to display it in expected test result.
* But, mask pattern masks path with some patterns.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
index d24c4ef085..ba5d06e079 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
@@ -85,7 +85,7 @@ public String getDatabaseName() {
}
/**
- * @param databaseName
+ * @param dbName
* the dbName to set
*/
public void setDatabaseName(String dbName) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
index 18cf12c968..609d1740a6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java
@@ -77,7 +77,7 @@ public ShowFunctionsDesc(Path resFile, String pattern) {
/**
* @param pattern
* names of tables to show
- * @param like
+ * @param isLikePattern
* is like keyword used
*/
public ShowFunctionsDesc(Path resFile, String pattern, boolean isLikePattern) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
index 8bb40abc8d..52a5d1b22b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SkewedColumnPositionPair.java
@@ -32,7 +32,7 @@
* 1. It's position in table column is 1.
* 2. It's position in skewed column list is 0.
*
- * This information will be used in {@FileSinkOperator} generateListBucketingDirName
+ * This information will be used in {@link org.apache.hadoop.hive.ql.exec.FileSinkOperator} generateListBucketingDirName
*/
public class SkewedColumnPositionPair {
private int tblColPosition;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
index 3ed5cb22f6..2f1ec27c64 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java
@@ -279,7 +279,6 @@ public SparkEdgeProperty getEdgeProperty(BaseWork a, BaseWork b) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b, SparkEdgeProperty edgeProp) {
workGraph.get(a).add(b);
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
index 3539f0d394..ac437783bc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java
@@ -370,7 +370,6 @@ public int compareTo(Dependency o) {
/**
* connect adds an edge between a and b. Both nodes have
* to be added prior to calling connect.
- * @param
*/
public void connect(BaseWork a, BaseWork b,
TezEdgeProperty edgeProp) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
index adcf7078e1..bf5bb2464f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java
@@ -27,8 +27,8 @@
/**
- * All member variables should have a setters and getters of the form get and set or else they won't be recreated properly at run
+ * All member variables should have a setters and getters of the form get<member
+ * name> and set<member name> or else they won't be recreated properly at run
* time.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
index caf0c67744..a69f762235 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java
@@ -35,17 +35,17 @@
private static final long serialVersionUID = 1L;
/**
- * GLOBAL No key. All rows --> 1 full aggregation on end of input
+ * GLOBAL No key. All rows --> 1 full aggregation on end of input
*
- * HASH Rows aggregated in to hash table on group key -->
+ * HASH Rows aggregated in to hash table on group key -->
* 1 partial aggregation per key (normally, unless there is spilling)
*
* MERGE_PARTIAL As first operator in a REDUCER, partial aggregations come grouped from
- * reduce-shuffle -->
+ * reduce-shuffle -->
* aggregate the partial aggregations and emit full aggregation on
* endGroup / closeOp
*
- * STREAMING Rows come from PARENT operator already grouped -->
+ * STREAMING Rows come from PARENT operator already grouped -->
* aggregate the rows and emit full aggregation on key change / closeOp
*
* NOTE: Hash can spill partial result rows prematurely if it runs low on memory.
@@ -123,16 +123,16 @@ public boolean getIsVectorizationGroupByComplexTypesEnabled() {
*
* Decides using GroupByDesc.Mode and whether there are keys.
*
- * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
+ * Mode.COMPLETE --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.STREAMING)
*
- * Mode.HASH --> ProcessingMode.HASH
+ * Mode.HASH --> ProcessingMode.HASH
*
- * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
+ * Mode.MERGEPARTIAL --> (numKeys == 0 ? ProcessingMode.GLOBAL : ProcessingMode.MERGE_PARTIAL)
*
* Mode.PARTIAL1,
* Mode.PARTIAL2,
* Mode.PARTIALS,
- * Mode.FINAL --> ProcessingMode.STREAMING
+ * Mode.FINAL --> ProcessingMode.STREAMING
*
*/
public static ProcessingMode groupByDescModeToVectorProcessingMode(GroupByDesc.Mode mode,
diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
index f1e3267cd3..b3d59e3cd0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
+++ ql/src/java/org/apache/hadoop/hive/ql/ppd/PredicatePushDown.java
@@ -53,9 +53,9 @@
* plan generation adds filters where they are seen but in some instances some
* of the filter expressions can be pushed nearer to the operator that sees this
* particular data for the first time. e.g. select a.*, b.* from a join b on
- * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
+ * (a.col1 = b.col1) where a.col1 > 20 and b.col2 > 40
*
- * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
+ * For the above query, the predicates (a.col1 > 20) and (b.col2 > 40), without
* predicate pushdown, would be evaluated after the join processing has been
* done. Suppose the two predicates filter out most of the rows from a and b,
* the join is unnecessarily processing these rows. With predicate pushdown,
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
index bc473ee349..94cfa5178c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorResponse.java
@@ -28,7 +28,7 @@
* CommandProcessor interface. Typically errorMessage
* and SQLState will only be set if the responseCode
* is not 0. Note that often {@code responseCode} ends up the exit value of
- * command shell process so should keep it to < 127.
+ * command shell process so should keep it to < 127.
*/
public class CommandProcessorResponse extends Exception {
private final int responseCode;
diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
index 77421b5bc0..a8a97a0127 100644
--- ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
@@ -36,7 +36,7 @@
/**
* This class processes HADOOP commands used for HDFS encryption. It is meant to be run
- * only by Hive unit & queries tests.
+ * only by Hive unit & queries tests.
*/
public class CryptoProcessor implements CommandProcessor {
public static final Logger LOG = LoggerFactory.getLogger(CryptoProcessor.class.getName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
index e19c053e14..f690422bfe 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationUtils.java
@@ -169,7 +169,6 @@ public static HivePrivilegeObject getHiveObjectRef(HiveObjectRef privObj) throws
* Convert authorization plugin principal type to thrift principal type
* @param type
* @return
- * @throws HiveException
*/
public static PrincipalType getThriftPrincipalType(HivePrincipalType type) {
if(type == null){
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
index 7678e8f1f8..7037f2c0ed 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeType.java
@@ -62,7 +62,7 @@ public Integer getToken() {
/**
* Do case lookup of PrivilegeType associated with this antlr token
- * @param privilegeName
+ * @param token
* @return corresponding PrivilegeType
*/
public static PrivilegeType getPrivTypeByToken(int token) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
index a4079b892e..9352aa2e7c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java
@@ -87,7 +87,7 @@ void revokePrivileges(List hivePrincipals, List hi
/**
* Create role
* @param roleName
- * @param adminGrantor - The user in "[ WITH ADMIN ]" clause of "create role"
+ * @param adminGrantor - The user in "[ WITH ADMIN <user> ]" clause of "create role"
* @throws HiveAuthzPluginException
* @throws HiveAccessControlException
*/
@@ -232,7 +232,7 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* returned, the Object has to be of type HiveAuthorizationTranslator
*
* @return
- * @throws HiveException
+ * @throws HiveAuthzPluginException
*/
Object getHiveAuthorizationTranslator() throws HiveAuthzPluginException;
@@ -246,19 +246,19 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* (part 1) It expects a valid filter condition to be returned. Null indicates no filtering is
* required.
*
- * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
+ * Example: table foo(c int) -> "c > 0 && c % 2 = 0"
*
* (part 2) It expects a valid expression as used in a select clause. Null
* is NOT a valid option. If no transformation is needed simply return the
* column name.
*
- * Example: column a -> "a" (no transform)
+ * Example: column a -> "a" (no transform)
*
- * Example: column a -> "reverse(a)" (call the reverse function on a)
+ * Example: column a -> "reverse(a)" (call the reverse function on a)
*
- * Example: column a -> "5" (replace column a with the constant 5)
+ * Example: column a -> "5" (replace column a with the constant 5)
*
- * @return List
+ * @return List<HivePrivilegeObject>
* please return the list of HivePrivilegeObjects that need to be rewritten.
*
* @throws SemanticException
@@ -271,7 +271,6 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp
* Returning false short-circuits the generation of row/column transforms.
*
* @return
- * @throws SemanticException
*/
boolean needTransform();
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 0b3b19b03e..87d2e68abe 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -146,7 +146,7 @@ public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String o
}
/**
- * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType.COMMAND_PARAMS}
+ * Create HivePrivilegeObject of type {@link HivePrivilegeObjectType#COMMAND_PARAMS}
* @param cmdParams
* @return
*/
@@ -215,7 +215,7 @@ public HivePrivObjectActionType getActionType() {
}
/**
- * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType.TABLE}
+ * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType#TABLE_OR_VIEW}
* In case of DML read operations, this is the set of columns being used.
* Column information is not set for DDL operations and for tables being written into
* @return list of applicable columns
@@ -225,7 +225,7 @@ public HivePrivObjectActionType getActionType() {
}
/**
- * The class name when the type is {@link HivePrivilegeObjectType.FUNCTION}
+ * The class name when the type is {@link HivePrivilegeObjectType#FUNCTION}
* @return the class name
*/
public String getClassName() {
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
index 988d235bb1..1d79082b4f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java
@@ -31,9 +31,6 @@
/**
* This method connects to the temporary storage.
*
- * @param hconf
- * HiveConf that contains the connection parameters.
- * @param sourceTask
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext scc);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
index bae732ca56..1230663391 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java
@@ -35,15 +35,12 @@
* database (if not exist).
* This method is usually called in the Hive client side rather than by the mappers/reducers
* so that it is initialized only once.
- * @param hconf HiveConf that contains the configurations parameters used to connect to
- * intermediate stats database.
* @return true if initialization is successful, false otherwise.
*/
public boolean init(StatsCollectionContext context);
/**
* This method connects to the intermediate statistics database.
- * @param hconf HiveConf that contains the connection parameters.
* @return true if connection is successful, false otherwise.
*/
public boolean connect(StatsCollectionContext context);
diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 2a7cf8c897..f00c72027a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -787,7 +787,7 @@ public static boolean containsNonPositives(List vals) {
}
/**
- * Get sum of all values in the list that are >0
+ * Get sum of all values in the list that are >0
* @param vals
* - list of values
* @return sum
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
index 675853d66b..21bde4ad0b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/SettableUDF.java
@@ -30,7 +30,6 @@
/**
* Add data to UDF prior to initialization.
* An exception may be thrown if the UDF doesn't know what to do with this data.
- * @param params UDF-specific data to add to the UDF
*/
void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
index ed5882ba39..7a590b87fc 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java
@@ -139,7 +139,7 @@ private void char2byte(int radix, int fromPos) {
}
/**
- * Convert numbers between different number bases. If toBase>0 the result is
+ * Convert numbers between different number bases. If toBase>0 the result is
* unsigned, otherwise it is signed.
*
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
index cd20783797..63b18fdbc7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java
@@ -36,7 +36,7 @@
* 'Ref' parse_url('http://facebook.com/path/p1.php?query=1#Ref', 'PROTOCOL')
* will return 'http' Possible values are
* HOST,PATH,QUERY,REF,PROTOCOL,AUTHORITY,FILE,USERINFO Also you can get a value
- * of particular key in QUERY, using syntax QUERY: eg: QUERY:k1.
+ * of particular key in QUERY, using syntax QUERY:<KEY_NAME> eg: QUERY:k1.
*/
@Description(name = "parse_url",
value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL",
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
index 738fd95869..c657a604c5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
@@ -75,7 +75,7 @@ public DoubleWritable evaluate(LongWritable a) {
/**
* Get the sign of the decimal input
*
- * @param dec decimal input
+ * @param decWritable decimal input
*
* @return -1, 0, or 1 representing the sign of the input decimal
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
index d1517ab7dd..360ae46d4a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java
@@ -47,12 +47,12 @@
* Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_(A,B) = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/(n_A+n_B)
@@ -136,12 +136,12 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* algorithm, based on work by Philippe Pébay and Donald Knuth.
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
- * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)):
- * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)):
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
+ * vx_n = vx_(n-1) + (x_n - mx_n)(x_n - mx_(n-1)): <variance * n>
+ * vy_n = vy_(n-1) + (y_n - my_n)(y_n - my_(n-1)): <variance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
index 8b088f8405..b1de95715a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java
@@ -44,10 +44,10 @@
* Arbitrary-Order Statistical Moments", Philippe Pebay, Sandia Labs):
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
@@ -128,10 +128,10 @@ public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticE
* http://infoserve.sandia.gov/sand_doc/2008/086212.pdf
*
* Incremental:
- * n :
- * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n :
- * my_n = my_(n-1) + [y_n - my_(n-1)]/n :
- * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) :
+ * n : <count>
+ * mx_n = mx_(n-1) + [x_n - mx_(n-1)]/n : <xavg>
+ * my_n = my_(n-1) + [y_n - my_(n-1)]/n : <yavg>
+ * c_n = c_(n-1) + (x_n - mx_(n-1))*(y_n - my_n) : <covariance * n>
*
* Merge:
* c_X = c_A + c_B + (mx_A - mx_B)*(my_A - my_B)*n_A*n_B/n_X
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
index 960d8fdb89..6125977dfd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java
@@ -46,7 +46,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDAF that accepts
- * array, array> and so on (arbitrary levels of nesting).
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting).
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
index 568a7ec0eb..53c657b06e 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLeadLag.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.io.IntWritable;
/**
- * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
+ * abstract class for Lead & lag UDAFs GenericUDAFLeadLag.
*
*/
public abstract class GenericUDAFLeadLag extends AbstractGenericUDAFResolver {
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
index 0d8d659ff6..6597f4b34b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java
@@ -64,7 +64,7 @@
* accept arguments of complex types, and return complex types. 2. It can accept
* variable length of arguments. 3. It can accept an infinite number of function
* signature - for example, it's easy to write a GenericUDF that accepts
- * array, array> and so on (arbitrary levels of nesting). 4. It
+ * array<int>, array<array<int>> and so on (arbitrary levels of nesting). 4. It
* can do short-circuit evaluations using DeferedObject.
*/
@InterfaceAudience.Public
@@ -222,7 +222,7 @@ public void close() throws IOException {
/**
* Some functions like comparisons may be affected by appearing order of arguments.
- * This is to convert a function, such as 3 > x to x < 3. The flip function of
+ * This is to convert a function, such as 3 > x to x < 3. The flip function of
* GenericUDFOPGreaterThan is GenericUDFOPLessThan.
*/
public GenericUDF flip() {
@@ -233,7 +233,6 @@ public GenericUDF flip() {
* Gets the negative function of the current one. E.g., GenericUDFOPNotEqual for
* GenericUDFOPEqual, or GenericUDFOPNull for GenericUDFOPNotNull.
* @return Negative function
- * @throws UDFArgumentException
*/
public GenericUDF negative() {
throw new UnsupportedOperationException("Negative function doesn't exist for " + getFuncName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
index ea9a59eeb1..5d3f171afb 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java
@@ -37,7 +37,7 @@
/**
* Generic UDF for string function
- * CONCAT_WS(sep, [string | array(string)]+).
+ * CONCAT_WS(sep, [string | array(string)]+).
* This mimics the function from
* MySQL http://dev.mysql.com/doc/refman/5.0/en/string-functions.html#
* function_concat-ws
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
index 25c54e9155..23708dc345 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java
@@ -76,7 +76,7 @@
/**
* IF(expr1,expr2,expr3)
- * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
+ * If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2;
* otherwise it returns expr3. IF() returns a numeric or string value, depending
* on the context in which it is used.
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
index ee869db12c..70f57b7727 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java
@@ -42,7 +42,7 @@
* GenericUDFTimestamp
*
* Example usage:
- * ... CAST( as TIMESTAMP) ...
+ * ... CAST(<Timestamp string> as TIMESTAMP) ...
*
* Creates a TimestampWritableV2 object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
index e5a25c3556..530794e040 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalDayTime.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalDayTime
*
* Example usage:
-* ... CAST( as INTERVAL DAY TO SECOND) ...
+* ... CAST(<Interval string> as INTERVAL DAY TO SECOND) ...
*
* Creates a HiveIntervalDayTimeWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
index 804b8e722f..8baf26c18a 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToIntervalYearMonth.java
@@ -34,7 +34,7 @@
* GenericUDFIntervalYearMonth
*
* Example usage:
-* ... CAST( as INTERVAL YEAR TO MONTH) ...
+* ... CAST(<Interval string> as INTERVAL YEAR TO MONTH) ...
*
* Creates a HiveIntervalYearMonthWritable object using PrimitiveObjectInspectorConverter
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
index e187355b19..ac23e50f64 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
@@ -44,7 +44,7 @@
* Additionally setup GenericUDTF with MapredContext before initializing.
* This is only called in runtime of MapRedTask.
*
- * @param context context
+ * @param mapredContext context
*/
public void configure(MapredContext mapredContext) {
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
index 8f3dfdbe3c..1fbfa4f814 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java
@@ -70,7 +70,7 @@
* where the first occurrence was LATE, followed by zero or more EARLY flights,
* followed by a ONTIME or EARLY flight.
* symbols specify a list of name, expression pairs. For e.g.
- * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
+ * 'LATE', arrival_delay > 0, 'EARLY', arrival_delay < 0 , 'ONTIME', arrival_delay == 0.
* These symbols can be used in the Pattern defined above.
* resultSelectList specified as a select list.
* The expressions in the selectList are evaluated in the context where all the
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
index e2b7035254..f1c4b731b8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java
@@ -62,7 +62,7 @@
* Based on Hive {@link GenericUDAFEvaluator}. Break up the responsibility of the old AbstractTableFunction
* class into a Resolver and Evaluator.
*
- * The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ * The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
* The Evaluator is responsible for providing the 2 execute methods:
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
index dbc7693420..bf012ddd03 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionResolver.java
@@ -37,15 +37,15 @@
* old AbstractTableFunction class into a Resolver and Evaluator.
* The Resolver is responsible for:
*
- * - setting up the {@link tableFunctionEvaluator}
+ *
- setting up the {@link TableFunctionEvaluator}
*
- Setting up the The raw and output ObjectInspectors of the Evaluator.
- *
- The Evaluator also holds onto the {@link TableFunctionDef}. This provides information
+ *
- The Evaluator also holds onto the {@link PartitionedTableFunctionDef}. This provides information
* about the arguments to the function, the shape of the Input partition and the Partitioning details.
*
* The Resolver for a function is obtained from the {@link FunctionRegistry}. The Resolver is initialized
* by the following 4 step process:
*
- * - The initialize method is called; which is passed the {@link PTFDesc} and the {@link TableFunctionDef}.
+ *
- The initialize method is called; which is passed the {@link PTFDesc} and the {@link PartitionedTableFunctionDef}.
*
- The resolver is then asked to setup the Raw ObjectInspector. This is only required if the Function reshapes
* the raw input.
*
- Once the Resolver has had a chance to compute the shape of the Raw Input that is fed to the partitioning
@@ -113,8 +113,6 @@ public TableFunctionEvaluator getEvaluator() {
* exist for all the Def (ArgDef, ColumnDef, WindowDef..). It is the responsibility of
* the TableFunction to construct the {@link ExprNodeEvaluator evaluators} and setup the OI.
*
- * @param tblFuncDef
- * @param ptfDesc
* @throws HiveException
*/
public abstract void initializeOutputOI() throws HiveException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
index cb966a7b2e..58e6289583 100644
--- ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
+++ ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java
@@ -83,7 +83,7 @@
* but can be made insert-only transactional tables and generate corresponding Alter Table commands.
*
* Note that to convert a table to full CRUD table requires that all files follow a naming
- * convention, namely 0000N_0 or 0000N_0_copy_M, N >= 0, M > 0. This utility can perform this
+ * convention, namely 0000N_0 or 0000N_0_copy_M, N >= 0, M > 0. This utility can perform this
* rename with "execute" option. It will also produce a script (with and w/o "execute" to
* perform the renames).
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
index 76753488ad..92651cd188 100644
--- ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
+++ ql/src/java/org/apache/hadoop/hive/ql/wm/Expression.java
@@ -17,7 +17,7 @@
/**
* Expression that is defined in triggers.
- * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (>) expression
+ * Most expressions will get triggered only after exceeding a limit. As a result, only greater than (>) expression
* is supported.
*/
public interface Expression {
@@ -43,7 +43,7 @@ public String getSymbol() {
}
/**
- * Evaluate current value against this expression. Return true if expression evaluates to true (current > limit)
+ * Evaluate current value against this expression. Return true if expression evaluates to true (current > limit)
* else false otherwise
*
* @param current - current value against which expression will be evaluated