diff --git build.xml build.xml
index 72264cf..f39cf06 100644
--- build.xml
+++ build.xml
@@ -687,7 +687,10 @@
+
+
+
diff --git cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index c6727f9..c764e7c 100644
--- cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -346,7 +346,7 @@ public class CliDriver {
* @param allowInterupting
* When true the function will handle SIG_INT (Ctrl+C) by interrupting the processing and
* returning -1
- * @return
+ * @return 0 if ok
*/
public int processLine(String line, boolean allowInterupting) {
SignalHandler oldSignal = null;
diff --git common/src/java/org/apache/hadoop/hive/common/cli/CommonCliOptions.java common/src/java/org/apache/hadoop/hive/common/cli/CommonCliOptions.java
index 681eb19..e229580 100644
--- common/src/java/org/apache/hadoop/hive/common/cli/CommonCliOptions.java
+++ common/src/java/org/apache/hadoop/hive/common/cli/CommonCliOptions.java
@@ -34,7 +34,9 @@ import org.apache.commons.cli.ParseException;
* all your own options or processing instructions), parse, and then use
* the resulting information.
*
- * See {@link HiveServer} or {@link HiveMetaStore} for examples of use.
+ * See {@link org.apache.hadoop.hive.service.HiveServer} or
+ * {@link org.apache.hadoop.hive.metastore.HiveMetaStore}
+ * for examples of use.
*
*/
public class CommonCliOptions {
@@ -55,10 +57,11 @@ public class CommonCliOptions {
/**
* Create an instance with common options (help, verbose, etc...).
*
+ * @param cliname the name of the command
* @param includeHiveConf include "hiveconf" as an option if true
*/
@SuppressWarnings("static-access")
- public CommonCliOptions(String cliname, boolean includeHiveconf) {
+ public CommonCliOptions(String cliname, boolean includeHiveConf) {
this.cliname = cliname;
// [-v|--verbose]
@@ -67,7 +70,7 @@ public class CommonCliOptions {
// [-h|--help]
OPTIONS.addOption(new Option("h", "help", false, "Print help information"));
- if (includeHiveconf) {
+ if (includeHiveConf) {
OPTIONS.addOption(OptionBuilder
.withValueSeparator()
.hasArgs(2)
diff --git common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
index 99dd08a..628a98c 100644
--- common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
+++ common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
@@ -43,7 +43,7 @@ public interface MetricsMBean extends DynamicMBean {
/**
*
* @param name
- * @return
+ * @return value associated with the key
* @throws Exception
*/
public abstract Object get(String name) throws IOException;
diff --git jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java
index d20a1a6..e59a65e 100644
--- jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java
+++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java
@@ -389,7 +389,6 @@ public class HiveDatabaseMetaData implements java.sql.DatabaseMetaData {
/**
* Returns the value of maxColumnNameLength.
*
- * @param int
*/
public int getMaxColumnNameLength() throws SQLException {
return maxColumnNameLength;
@@ -654,7 +653,7 @@ public class HiveDatabaseMetaData implements java.sql.DatabaseMetaData {
/**
* Translate hive table types into jdbc table types.
* @param hivetabletype
- * @return
+ * @return the type of the table
*/
public static String toJdbcTableType(String hivetabletype) {
if (hivetabletype==null) {
diff --git jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java
index 2fc8697..b847d51 100644
--- jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java
+++ jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java
@@ -96,7 +96,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#addBatch()
*/
@@ -107,7 +107,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#clearParameters()
*/
@@ -121,7 +121,7 @@ public class HivePreparedStatement implements PreparedStatement {
* @return boolean Returns true if a resultSet is created, false if not.
* Note: If the result set is empty a true is returned.
*
- * @throws
+ * @throws SQLException
*/
public boolean execute() throws SQLException {
@@ -133,7 +133,7 @@ public class HivePreparedStatement implements PreparedStatement {
* Invokes executeQuery(sql) using the sql provided to the constructor.
*
* @return ResultSet
- * @throws
+ * @throws SQLException
*/
public ResultSet executeQuery() throws SQLException {
@@ -142,7 +142,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#executeUpdate()
*/
@@ -241,7 +241,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#getMetaData()
*/
@@ -252,7 +252,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#getParameterMetaData()
*/
@@ -263,7 +263,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setArray(int, java.sql.Array)
*/
@@ -274,7 +274,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream)
*/
@@ -285,7 +285,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream,
* int)
*/
@@ -297,7 +297,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream,
* long)
*/
@@ -309,7 +309,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBigDecimal(int, java.math.BigDecimal)
*/
@@ -320,7 +320,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream)
*/
@@ -331,7 +331,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream,
* int)
*/
@@ -343,7 +343,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream,
* long)
*/
@@ -355,7 +355,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBlob(int, java.sql.Blob)
*/
@@ -366,7 +366,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream)
*/
@@ -377,7 +377,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream, long)
*/
@@ -389,7 +389,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBoolean(int, boolean)
*/
@@ -399,7 +399,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setByte(int, byte)
*/
@@ -409,7 +409,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setBytes(int, byte[])
*/
@@ -420,7 +420,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader)
*/
@@ -431,7 +431,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader,
* int)
*/
@@ -444,7 +444,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader,
* long)
*/
@@ -457,7 +457,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setClob(int, java.sql.Clob)
*/
@@ -468,7 +468,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setClob(int, java.io.Reader)
*/
@@ -479,7 +479,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setClob(int, java.io.Reader, long)
*/
@@ -490,7 +490,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setDate(int, java.sql.Date)
*/
@@ -501,7 +501,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setDate(int, java.sql.Date,
* java.util.Calendar)
*/
@@ -513,7 +513,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setDouble(int, double)
*/
@@ -523,7 +523,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setFloat(int, float)
*/
@@ -533,7 +533,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setInt(int, int)
*/
@@ -543,7 +543,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setLong(int, long)
*/
@@ -553,7 +553,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader)
*/
@@ -564,7 +564,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader,
* long)
*/
@@ -577,7 +577,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNClob(int, java.sql.NClob)
*/
@@ -588,7 +588,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNClob(int, java.io.Reader)
*/
@@ -599,7 +599,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNClob(int, java.io.Reader, long)
*/
@@ -610,7 +610,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNString(int, java.lang.String)
*/
@@ -621,7 +621,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNull(int, int)
*/
@@ -632,7 +632,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setNull(int, int, java.lang.String)
*/
@@ -643,7 +643,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setObject(int, java.lang.Object)
*/
@@ -654,7 +654,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int)
*/
@@ -666,7 +666,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int, int)
*/
@@ -678,7 +678,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setRef(int, java.sql.Ref)
*/
@@ -689,7 +689,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setRowId(int, java.sql.RowId)
*/
@@ -700,7 +700,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setSQLXML(int, java.sql.SQLXML)
*/
@@ -711,7 +711,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setShort(int, short)
*/
@@ -721,7 +721,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setString(int, java.lang.String)
*/
@@ -732,7 +732,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setTime(int, java.sql.Time)
*/
@@ -743,7 +743,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setTime(int, java.sql.Time,
* java.util.Calendar)
*/
@@ -755,7 +755,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp)
*/
@@ -766,7 +766,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp,
* java.util.Calendar)
*/
@@ -779,7 +779,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setURL(int, java.net.URL)
*/
@@ -790,7 +790,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.PreparedStatement#setUnicodeStream(int, java.io.InputStream,
* int)
*/
@@ -803,7 +803,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#addBatch(java.lang.String)
*/
@@ -814,7 +814,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#cancel()
*/
@@ -825,7 +825,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#clearBatch()
*/
@@ -836,7 +836,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#clearWarnings()
*/
@@ -847,7 +847,7 @@ public class HivePreparedStatement implements PreparedStatement {
/**
* Closes the prepared statement.
*
- * @throws
+ * @throws SQLException
*/
public void close() throws SQLException {
@@ -861,7 +861,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#execute(java.lang.String)
*/
@@ -872,7 +872,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#execute(java.lang.String, int)
*/
@@ -883,7 +883,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#execute(java.lang.String, int[])
*/
@@ -894,7 +894,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#execute(java.lang.String, java.lang.String[])
*/
@@ -905,7 +905,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeBatch()
*/
@@ -916,7 +916,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeQuery(java.lang.String)
*/
@@ -927,7 +927,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeUpdate(java.lang.String)
*/
@@ -938,7 +938,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeUpdate(java.lang.String, int)
*/
@@ -949,7 +949,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeUpdate(java.lang.String, int[])
*/
@@ -960,7 +960,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#executeUpdate(java.lang.String, java.lang.String[])
*/
@@ -971,7 +971,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getConnection()
*/
@@ -982,7 +982,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getFetchDirection()
*/
@@ -993,7 +993,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getFetchSize()
*/
@@ -1004,7 +1004,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getGeneratedKeys()
*/
@@ -1015,7 +1015,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getMaxFieldSize()
*/
@@ -1026,7 +1026,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getMaxRows()
*/
@@ -1036,7 +1036,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getMoreResults()
*/
@@ -1047,7 +1047,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getMoreResults(int)
*/
@@ -1058,7 +1058,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getQueryTimeout()
*/
@@ -1069,7 +1069,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getResultSet()
*/
@@ -1079,7 +1079,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getResultSetConcurrency()
*/
@@ -1090,7 +1090,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getResultSetHoldability()
*/
@@ -1101,7 +1101,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getResultSetType()
*/
@@ -1112,7 +1112,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getUpdateCount()
*/
@@ -1122,7 +1122,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#getWarnings()
*/
@@ -1132,7 +1132,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#isClosed()
*/
@@ -1142,7 +1142,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#isPoolable()
*/
@@ -1153,7 +1153,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setCursorName(java.lang.String)
*/
@@ -1164,7 +1164,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setEscapeProcessing(boolean)
*/
@@ -1175,7 +1175,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setFetchDirection(int)
*/
@@ -1186,7 +1186,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setFetchSize(int)
*/
@@ -1197,7 +1197,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setMaxFieldSize(int)
*/
@@ -1208,7 +1208,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setMaxRows(int)
*/
@@ -1221,7 +1221,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setPoolable(boolean)
*/
@@ -1232,7 +1232,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Statement#setQueryTimeout(int)
*/
@@ -1243,7 +1243,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Wrapper#isWrapperFor(java.lang.Class)
*/
@@ -1254,7 +1254,7 @@ public class HivePreparedStatement implements PreparedStatement {
/*
* (non-Javadoc)
- *
+ *
* @see java.sql.Wrapper#unwrap(java.lang.Class)
*/
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 06b6566..3aeeac4 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -69,7 +69,7 @@ public interface AlterHandler extends Configurable {
* original values of the partition being altered
* @param new_part
* new partition object
- * @return
+ * @return the altered partition
* @throws InvalidOperationException
* @throws InvalidObjectException
* @throws AlreadyExistsException
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index b332b70..cdc22b8 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -290,7 +290,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
/**
* Get a cached RawStore.
*
- * @return
+ * @return the cached RawStore
* @throws MetaException
*/
@InterfaceAudience.LimitedPrivate({"HCATALOG"})
@@ -2873,7 +2873,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
*
* @param port
* @param bridge
- * @param hiveconf
+ * @param conf
* configuration overrides
* @throws Throwable
*/
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 37967d4..2c55272 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -164,8 +164,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
* @throws InvalidOperationException
* @throws MetaException
* @throws TException
- * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(java.lang.String,
- * java.lang.String, org.apache.hadoop.hive.metastore.api.Table)
+ * @see
+ * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+ * java.lang.String, java.lang.String,
+ * org.apache.hadoop.hive.metastore.api.Table)
*/
public void alter_table(String dbname, String tbl_name, Table new_tbl)
throws InvalidOperationException, MetaException, TException {
@@ -333,7 +335,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
* @throws AlreadyExistsException
* @throws MetaException
* @throws TException
- * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+ * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
*/
public int add_partitions(List new_parts)
throws InvalidObjectException, AlreadyExistsException, MetaException,
@@ -369,13 +371,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
/**
* Create a new Database
* @param db
- * @return true or false
* @throws AlreadyExistsException
* @throws InvalidObjectException
* @throws MetaException
* @throws TException
- * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(java.lang.String,
- * java.lang.String)
+ * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
*/
public void createDatabase(Database db)
throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
@@ -425,12 +425,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
/**
* @param name
- * @return true or false
* @throws NoSuchObjectException
* @throws InvalidOperationException
* @throws MetaException
* @throws TException
- * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String)
+ * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
*/
public void dropDatabase(String name)
throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
@@ -842,7 +841,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
/**
* create an index
* @param index the index object
- * @param index table which stores the index data
+ * @param indexTable which stores the index data
* @throws InvalidObjectException
* @throws MetaException
* @throws NoSuchObjectException
@@ -873,7 +872,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
* @param dbName
* @param tblName
* @param indexName
- * @return
+ * @return the index
* @throws MetaException
* @throws UnknownTableException
* @throws NoSuchObjectException
@@ -887,10 +886,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
/**
* list indexes of the give base table
- * @param db_name
- * @param tbl_name
+ * @param dbName
+ * @param tblName
* @param max
- * @return
+ * @return the list of indexes
* @throws NoSuchObjectException
* @throws MetaException
* @throws TException
@@ -903,10 +902,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
/**
* list all the index names of the give base table.
*
- * @param db_name
- * @param tbl_name
+ * @param dbName
+ * @param tblName
* @param max
- * @return
+ * @return list of indexes
* @throws MetaException
* @throws TException
*/
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 2c0eee0..f1a6c8b 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -126,11 +126,9 @@ public interface IMetaStoreClient {
*
* @param dbName
* The name of the database from which you will retrieve the table names
- * @param filterType
- * The type of filter
* @param filter
* The filter string
- * @param max_tables
+ * @param maxTables
* The maximum number of tables returned
* @return A list of table names that match the desired filter
*/
@@ -223,7 +221,7 @@ public interface IMetaStoreClient {
/**
* Get a Database Object
* @param databaseName name of the database to fetch
- * @return
+ * @return the database
* @throws NoSuchObjectException The database does not exist
* @throws MetaException Could not fetch the database
* @throws TException A thrift communication error occurred
@@ -361,7 +359,7 @@ public interface IMetaStoreClient {
* @param pvals
* @param userName
* @param groupNames
- * @return
+ * @return the partition
* @throws MetaException
* @throws UnknownTableException
* @throws NoSuchObjectException
@@ -417,7 +415,7 @@ public interface IMetaStoreClient {
* @param s
* @param userName
* @param groupNames
- * @return
+ * @return the list of partitions
* @throws NoSuchObjectException
*/
public List listPartitionsWithAuthInfo(String dbName,
@@ -444,7 +442,7 @@ public interface IMetaStoreClient {
* @param s
* @param userName
* @param groupNames
- * @return
+ * @return the list of paritions
* @throws NoSuchObjectException
*/
public List listPartitionsWithAuthInfo(String dbName,
@@ -657,7 +655,7 @@ public interface IMetaStoreClient {
* @param dbName
* @param tblName
* @param indexName
- * @return
+ * @return the index
* @throws MetaException
* @throws UnknownTableException
* @throws NoSuchObjectException
@@ -673,7 +671,7 @@ public interface IMetaStoreClient {
* @param db_name
* @param tbl_name
* @param max
- * @return
+ * @return the list of indexes
* @throws NoSuchObjectException
* @throws MetaException
* @throws TException
@@ -687,7 +685,7 @@ public interface IMetaStoreClient {
* @param db_name
* @param tbl_name
* @param max
- * @return
+ * @return the list of names
* @throws MetaException
* @throws TException
*/
@@ -699,7 +697,7 @@ public interface IMetaStoreClient {
* @param tbl_name
* @param name index name
* @param deleteData
- * @return
+ * @return true on success
* @throws NoSuchObjectException
* @throws MetaException
* @throws TException
@@ -709,9 +707,9 @@ public interface IMetaStoreClient {
MetaException, TException;
/**
- * @param Role
+ * @param role
* role object
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -721,9 +719,8 @@ public interface IMetaStoreClient {
/**
* @param role_name
* role name
- * @param db_name
*
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -731,7 +728,7 @@ public interface IMetaStoreClient {
/**
* list all role names
- * @return
+ * @return list of names
* @throws TException
* @throws MetaException
*/
@@ -745,7 +742,7 @@ public interface IMetaStoreClient {
* @param grantor
* @param grantorType
* @param grantOption
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -759,9 +756,8 @@ public interface IMetaStoreClient {
* @param user_name
* user name
* @param principalType
- * @param db_name
*
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -772,7 +768,7 @@ public interface IMetaStoreClient {
*
* @param principalName
* @param principalType
- * @return
+ * @return list of roles
* @throws MetaException
* @throws TException
*/
@@ -783,7 +779,7 @@ public interface IMetaStoreClient {
* @param hiveObject
* @param user_name
* @param group_names
- * @return
+ * @return the privilege set
* @throws MetaException
* @throws TException
*/
@@ -795,7 +791,7 @@ public interface IMetaStoreClient {
* @param principal_name
* @param principal_type
* @param hiveObject
- * @return
+ * @return list of privileges
* @throws MetaException
* @throws TException
*/
@@ -805,7 +801,7 @@ public interface IMetaStoreClient {
/**
* @param privileges
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -814,7 +810,7 @@ public interface IMetaStoreClient {
/**
* @param privileges
- * @return
+ * @return true on success
* @throws MetaException
* @throws TException
*/
@@ -824,7 +820,7 @@ public interface IMetaStoreClient {
/**
* @param owner the intended owner for the token
* @param renewerKerberosPrincipalName
- * @return
+ * @return the string of the token
* @throws MetaException
* @throws TException
*/
@@ -833,7 +829,7 @@ public interface IMetaStoreClient {
/**
* @param tokenStrForm
- * @return
+ * @return the new expiration time
* @throws MetaException
* @throws TException
*/
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
index bd276a9..0207acf 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
@@ -47,13 +47,13 @@ public abstract class MetaStoreEventListener implements Configurable {
}
/**
- * @param create table event.
+ * @param tableEvent table event.
* @throws MetaException
*/
public abstract void onCreateTable (CreateTableEvent tableEvent) throws MetaException;
/**
- * @param drop table event.
+ * @param tableEvent table event.
* @throws MetaException
*/
public abstract void onDropTable (DropTableEvent tableEvent) throws MetaException;
@@ -73,25 +73,25 @@ public abstract class MetaStoreEventListener implements Configurable {
public abstract void onAddPartition (AddPartitionEvent partitionEvent) throws MetaException;
/**
- * @param drop partition event
+ * @param partitionEvent partition event
* @throws MetaException
*/
public abstract void onDropPartition (DropPartitionEvent partitionEvent) throws MetaException;
/**
- * @param alter partition event
+ * @param partitionEvent partition event
* @throws MetaException
*/
public abstract void onAlterPartition (AlterPartitionEvent partitionEvent) throws MetaException;
/**
- * @param create database event
+ * @param dbEvent database event
* @throws MetaException
*/
public abstract void onCreateDatabase (CreateDatabaseEvent dbEvent) throws MetaException;
/**
- * @param drop database event
+ * @param dbEvent database event
* @throws MetaException
*/
public abstract void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException;
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
index 744bf8c..a141793 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
@@ -30,10 +30,10 @@ public interface MetaStoreFS {
/**
* delete a directory
- *
+ *
* @param f
* @param recursive
- * @return
+ * @return true on success
* @throws MetaException
*/
public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 32668bd..2eec7dc 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -219,8 +219,9 @@ public class MetaStoreUtils {
*
* @param conf
* - hadoop config
- * @param partition
+ * @param part
* the partition
+ * @param table the table
* @return the Deserializer
* @exception MetaException
* if any problems instantiating the Deserializer
@@ -487,7 +488,7 @@ public class MetaStoreUtils {
* @param tableName table name
* @param partitionKeys partition columns
* @param tblSchema The table level schema from which this partition should be copied.
- * @return
+ * @return the properties
*/
public static Properties getPartSchemaFromTableSchema(
org.apache.hadoop.hive.metastore.api.StorageDescriptor sd,
@@ -942,7 +943,7 @@ public class MetaStoreUtils {
* Given a map of partition column names to values, this creates a filter
* string that can be used to call the *byFilter methods
* @param m
- * @return
+ * @return the filter string
*/
public static String makeFilterStringFromMap(Map m) {
StringBuilder filter = new StringBuilder();
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 0f3c67d..11c7f23 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -139,8 +139,6 @@ public interface RawStore extends Configurable {
* Gets a list of tables based on a filter string and filter type.
* @param dbName
* The name of the database from which you will retrieve the table names
- * @param filterType
- * The type of filter
* @param filter
* The filter string
* @param max_tables
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
index aefb81e..d13171b 100755
--- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -283,7 +283,7 @@ public class Warehouse {
* Makes a partition name from a specification
* @param spec
* @param addTrailingSeperator if true, adds a trailing separator e.g. 'ds=1/'
- * @return
+ * @return partition name
* @throws MetaException
*/
public static String makePartName(Map spec,
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
index dd7e3ce..4fa841b 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
@@ -33,7 +33,7 @@ public interface JDOConnectionURLHook {
* attempt.
*
* @param conf The configuration used to initialize this instance of the HMS
- * @return
+ * @return the connection URL
* @throws Exception
*/
public String getJdoConnectionUrl(Configuration conf)
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java
index b76bf27..4550fb2 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MDBPrivilege.java
@@ -19,21 +19,21 @@
package org.apache.hadoop.hive.metastore.model;
public class MDBPrivilege {
-
+
private String principalName;
-
+
private String principalType;
-
+
private MDatabase database;
-
+
private int createTime;
-
+
private String privilege;
-
+
private String grantor;
-
+
private String grantorType;
-
+
private boolean grantOption;
public MDBPrivilege() {
@@ -75,12 +75,12 @@ public class MDBPrivilege {
}
/**
- * @param dbPrivileges a set of privileges this user/role/group has
+ * @param dbPrivilege a set of privileges this user/role/group has
*/
public void setPrivilege(String dbPrivilege) {
this.privilege = dbPrivilege;
}
-
+
public MDatabase getDatabase() {
return database;
}
@@ -96,7 +96,7 @@ public class MDBPrivilege {
public void setCreateTime(int createTime) {
this.createTime = createTime;
}
-
+
public String getGrantor() {
return grantor;
}
@@ -104,7 +104,7 @@ public class MDBPrivilege {
public void setGrantor(String grantor) {
this.grantor = grantor;
}
-
+
public String getGrantorType() {
return grantorType;
}
@@ -120,7 +120,7 @@ public class MDBPrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getPrincipalType() {
return principalType;
}
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java
index f97e879..1dbe3a4 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MGlobalPrivilege.java
@@ -19,25 +19,25 @@
package org.apache.hadoop.hive.metastore.model;
/**
- * User global level privileges
+ * User global level privileges
*/
public class MGlobalPrivilege {
//principal name, can be a user, group, or role
private String principalName;
-
+
private String principalType;
-
+
private String privilege;
-
+
private int createTime;
-
+
private String grantor;
-
+
private String grantorType;
-
+
private boolean grantOption;
-
+
public MGlobalPrivilege() {
super();
}
@@ -63,12 +63,12 @@ public class MGlobalPrivilege {
}
/**
- * @param dbPrivileges set of global privileges to user
+ * @param dbPrivilege set of global privileges to user
*/
public void setPrivilege(String dbPrivilege) {
this.privilege = dbPrivilege;
}
-
+
public String getPrincipalName() {
return principalName;
}
@@ -92,7 +92,7 @@ public class MGlobalPrivilege {
public void setGrantor(String grantor) {
this.grantor = grantor;
}
-
+
public boolean getGrantOption() {
return grantOption;
}
@@ -100,7 +100,7 @@ public class MGlobalPrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getPrincipalType() {
return principalType;
}
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MIndex.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MIndex.java
index 6b788b9..c9f1149 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MIndex.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MIndex.java
@@ -24,7 +24,7 @@ import java.util.Map;
* Represents hive's index definition.
*/
public class MIndex {
-
+
private String indexName;
private MTable origTable;
private int createTime;
@@ -36,15 +36,17 @@ public class MIndex {
private boolean deferredRebuild;
public MIndex() {}
-
+
/**
* @param indexName
- * @param orignialTable
+ * @param baseTable
* @param createTime
* @param lastAccessTime
* @param parameters
* @param indexTable
* @param sd
+ * @param indexHandlerClass
+ * @param deferredRebuild
*/
public MIndex(String indexName, MTable baseTable, int createTime,
int lastAccessTime, Map parameters, MTable indexTable,
@@ -146,7 +148,7 @@ public class MIndex {
public void setIndexTable(MTable indexTable) {
this.indexTable = indexTable;
}
-
+
/**
* @return storage descriptor
*/
@@ -174,14 +176,14 @@ public class MIndex {
public void setIndexHandlerClass(String indexHandlerClass) {
this.indexHandlerClass = indexHandlerClass;
}
-
+
/**
* @return auto rebuild
*/
public boolean isDeferredRebuild() {
return deferredRebuild;
}
-
+
/**
* @return auto rebuild
*/
@@ -190,7 +192,7 @@ public class MIndex {
}
/**
- * @param autoRebuild
+ * @param deferredRebuild
*/
public void setDeferredRebuild(boolean deferredRebuild) {
this.deferredRebuild = deferredRebuild;
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnPrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnPrivilege.java
index 68936bd..555222b 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnPrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionColumnPrivilege.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.metastore.model;
public class MPartitionColumnPrivilege {
- private String principalName;
+ private String principalName;
private String principalType;
@@ -32,25 +32,26 @@ public class MPartitionColumnPrivilege {
private String privilege;
private int createTime;
-
+
private String grantor;
-
+
private String grantorType;
-
+
private boolean grantOption;
public MPartitionColumnPrivilege() {
}
-
+
/**
* @param principalName
- * @param isRole
- * @param isGroup
+ * @param principalType
* @param partition
* @param columnName
* @param privileges
* @param createTime
* @param grantor
+ * @param grantorType
+ * @param grantOption
*/
public MPartitionColumnPrivilege(String principalName, String principalType,
MPartition partition, String columnName, String privileges, int createTime,
@@ -66,7 +67,7 @@ public class MPartitionColumnPrivilege {
this.grantorType = grantorType;
this.grantOption = grantOption;
}
-
+
/**
* @return column name
*/
@@ -132,7 +133,7 @@ public class MPartitionColumnPrivilege {
public void setGrantor(String grantor) {
this.grantor = grantor;
}
-
+
public String getGrantorType() {
return grantorType;
}
@@ -140,7 +141,7 @@ public class MPartitionColumnPrivilege {
public void setGrantorType(String grantorType) {
this.grantorType = grantorType;
}
-
+
public boolean getGrantOption() {
return grantOption;
}
@@ -148,7 +149,7 @@ public class MPartitionColumnPrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getPrincipalType() {
return principalType;
}
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java
index d77af76..266acd0 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java
@@ -64,7 +64,7 @@ public class MPartitionEvent {
}
/**
- * @param eventTime the eventTime to set
+ * @param createTime the eventTime to set
*/
public void setEventTime(long createTime) {
this.eventTime = createTime;
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionPrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionPrivilege.java
index 08da18e..2af0d00 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionPrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MPartitionPrivilege.java
@@ -19,23 +19,23 @@
package org.apache.hadoop.hive.metastore.model;
public class MPartitionPrivilege {
-
+
private String principalName;
-
+
private String principalType;
private MPartition partition;
-
+
private String privilege;
-
+
private int createTime;
-
+
private String grantor;
-
+
private String grantorType;
private boolean grantOption;
-
+
public MPartitionPrivilege() {
}
@@ -75,7 +75,7 @@ public class MPartitionPrivilege {
public void setPrivilege(String dbPrivilege) {
this.privilege = dbPrivilege;
}
-
+
/**
* @return create time
*/
@@ -91,7 +91,7 @@ public class MPartitionPrivilege {
}
/**
- * @return
+ * @return the grantor
*/
public String getGrantor() {
return grantor;
@@ -119,7 +119,7 @@ public class MPartitionPrivilege {
public void setPartition(MPartition partition) {
this.partition = partition;
}
-
+
public boolean getGrantOption() {
return grantOption;
}
@@ -127,7 +127,7 @@ public class MPartitionPrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getGrantorType() {
return grantorType;
}
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java
index 7e50d30..a7c4b73 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MRegionStorageDescriptor.java
@@ -48,7 +48,7 @@ public class MRegionStorageDescriptor implements Serializable {
}
/**
- * @param region
+ * @param regionName
*/
public void setRegionName(String regionName) {
this.regionName = regionName;
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnPrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnPrivilege.java
index 2d1b010..e3ce12c 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnPrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableColumnPrivilege.java
@@ -20,36 +20,37 @@ package org.apache.hadoop.hive.metastore.model;
public class MTableColumnPrivilege {
- private String principalName;
+ private String principalName;
private String principalType;
private MTable table;
-
+
private String columnName;
private String privilege;
private int createTime;
-
+
private String grantor;
-
+
private String grantorType;
-
+
private boolean grantOption;
public MTableColumnPrivilege() {
}
-
+
/**
* @param principalName
- * @param isRole
- * @param isGroup
+ * @param principalType
* @param table
* @param columnName
* @param privileges
* @param createTime
* @param grantor
+ * @param grantorType
+ * @param grantOption
*/
public MTableColumnPrivilege(String principalName, String principalType,
MTable table, String columnName, String privileges, int createTime,
@@ -65,7 +66,7 @@ public class MTableColumnPrivilege {
this.grantorType = grantorType;
this.grantOption = grantOption;
}
-
+
/**
* @return column name
*/
@@ -123,7 +124,7 @@ public class MTableColumnPrivilege {
public void setTable(MTable table) {
this.table = table;
}
-
+
public String getGrantor() {
return grantor;
}
@@ -131,7 +132,7 @@ public class MTableColumnPrivilege {
public void setGrantor(String grantor) {
this.grantor = grantor;
}
-
+
public String getGrantorType() {
return grantorType;
}
@@ -139,7 +140,7 @@ public class MTableColumnPrivilege {
public void setGrantorType(String grantorType) {
this.grantorType = grantorType;
}
-
+
public boolean getGrantOption() {
return grantOption;
}
@@ -147,7 +148,7 @@ public class MTableColumnPrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getPrincipalType() {
return principalType;
}
diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePrivilege.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePrivilege.java
index 54d11dd..bee33ee 100644
--- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePrivilege.java
+++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTablePrivilege.java
@@ -19,23 +19,23 @@
package org.apache.hadoop.hive.metastore.model;
public class MTablePrivilege {
-
+
private String principalName;
-
+
private String principalType;
private MTable table;
-
+
private String privilege;
-
+
private int createTime;
-
+
private String grantor;
-
+
private String grantorType;
private boolean grantOption;
-
+
public MTablePrivilege() {
}
@@ -75,7 +75,7 @@ public class MTablePrivilege {
public void setPrivilege(String dbPrivilege) {
this.privilege = dbPrivilege;
}
-
+
/**
* @return create time
*/
@@ -91,7 +91,7 @@ public class MTablePrivilege {
}
/**
- * @return
+ * @return the grantor
*/
public String getGrantor() {
return grantor;
@@ -127,7 +127,7 @@ public class MTablePrivilege {
public void setGrantOption(boolean grantOption) {
this.grantOption = grantOption;
}
-
+
public String getGrantorType() {
return grantorType;
}
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java
index 124cc4c..a8b30da 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java
+++ metastore/src/test/org/apache/hadoop/hive/metastore/DummyListener.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hive.metastore.events.ListenerEvent;
import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
/** A dummy implementation for
- * {@link org.apache.hadoop.hive.metastore.hadooorg.apache.hadoop.hive.metastore.MetaStoreEventListener}
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener}
* for testing purposes.
*/
public class DummyListener extends MetaStoreEventListener{
diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java metastore/src/test/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index 1270610..b8f9935 100644
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hive.ql.session.SessionState;
/**
* TestMetaStoreEventListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.hadooorg.apache.hadoop.hive.metastore.MetaStoreEventListener}
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener}
*/
public class TestMetaStoreEventListener extends TestCase {
private static final String msPort = "20001";
diff --git ql/src/java/org/apache/hadoop/hive/ql/Driver.java ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 208d5cd..4c60833 100644
--- ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -387,7 +387,7 @@ public class Driver implements CommandProcessor {
* is useful for generating re-entrant QL queries.
* @param command The HiveQL query to compile
* @param resetTaskIds Resets taskID counter if true.
- * @return
+ * @return 0 for ok
*/
public int compile(String command, boolean resetTaskIds) {
PerfLogger perfLogger = PerfLogger.getPerfLogger();
@@ -1264,9 +1264,8 @@ public class Driver implements CommandProcessor {
* name of the task, if it is a map-reduce job
* @param jobs
* number of map-reduce jobs
- * @param curJobNo
- * the sequential number of the next map-reduce job
- * @return the updated number of last the map-reduce job launched
+ * @param cxt
+ * the driver context
*/
public void launchTask(Task extends Serializable> tsk, String queryId, boolean noName,
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
index 5c94878..30553cd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java
@@ -47,7 +47,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
@SuppressWarnings("nls")
public final class ArchiveUtils {
private static final Log LOG = LogFactory.getLog(ArchiveUtils.class.getName());
-
+
public static String ARCHIVING_LEVEL = "archiving_level";
/**
@@ -91,7 +91,7 @@ public final class ArchiveUtils {
prefixFields.add(fs);
prefixValues.add(partSpec.get(fs.getName()));
}
-
+
return new PartSpecInfo(prefixFields, prefixValues);
}
@@ -229,7 +229,7 @@ public final class ArchiveUtils {
* Determines whether a partition has been archived
*
* @param p
- * @return
+ * @return is it archived?
*/
public static boolean isArchived(Partition p) {
Map params = p.getParameters();
@@ -264,12 +264,12 @@ public final class ArchiveUtils {
* argument, level, is used for the prefix length. For example, partition
* (ds='2010-01-01', hr='00', min='00'), level 1 will reture 'ds=2010-01-01',
* and level 2 will return 'ds=2010-01-01/hr=00'.
- *
+ *
* @param p
* partition object
* @param level
* level for prefix depth
- * @return
+ * @return prefix of partition's string representation
* @throws HiveException
*/
public static String getPartialName(Partition p, int level) throws HiveException {
@@ -315,7 +315,7 @@ public final class ArchiveUtils {
public static String conflictingArchiveNameOrNull(Hive db, Table tbl,
LinkedHashMap partSpec)
throws HiveException {
-
+
List partKeys = tbl.getPartitionKeys();
int partSpecLevel = 0;
for (FieldSchema partKey : partKeys) {
@@ -324,7 +324,7 @@ public final class ArchiveUtils {
}
partSpecLevel++;
}
-
+
if(partSpecLevel != partSpec.size()) {
throw new HiveException("partspec " + partSpec
+ " is wrong for table " + tbl.getTableName());
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapperContext.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapperContext.java
index 2867039..f064efd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapperContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapperContext.java
@@ -74,7 +74,7 @@ public class ExecMapperContext {
* after the input file changed. This is first introduced to process bucket
* map join.
*
- * @return
+ * @return is the input file changed?
*/
public boolean inputFileChanged() {
if (!inputFileChecked) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java
index 441b637..3a7e180 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java
@@ -170,7 +170,7 @@ public class ExprNodeGenericFuncEvaluator extends ExprNodeEvaluator {
* If the genericUDF is not a base comparison, or there is an error executing the comparison, it
* returns null.
* @param row
- * @return
+ * @return the compare results
* @throws HiveException
*/
public Integer compare(Object row) throws HiveException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index b9e698c..c8a9765 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -716,7 +716,9 @@ public final class FunctionRegistry {
*
* @param name
* the name of the UDAF
- * @param argumentTypeInfos
+ * @param argumentOIs
+ * @param isDistinct
+ * @param isAllColumns
* @return The UDAF evaluator
*/
@SuppressWarnings("deprecation")
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
index 25f0d2b..b1e0b35 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/HadoopJobExecHelper.java
@@ -100,7 +100,7 @@ public class HadoopJobExecHelper {
* this msg pattern is used to track when a job is successfully done.
*
* @param jobId
- * @return
+ * @return the job end message
*/
public static String getJobEndMsg(String jobId) {
return "Ended Job = " + jobId;
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index 1a4fe9d..823d995 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -1336,8 +1336,6 @@ public final class Utilities {
/**
* Remove all temporary files and duplicate (double-committed) files from a given directory.
- *
- * @return a list of path names corresponding to should-be-created empty buckets.
*/
public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws IOException {
removeTempOrDuplicateFiles(fs, path, null);
@@ -1645,8 +1643,8 @@ public final class Utilities {
/**
* Calculate the total size of input files.
*
- * @param job
- * the hadoop job conf.
+ * @param ctx
+ * the hadoop job context
* @param work
* map reduce job plan
* @param filter
@@ -2113,10 +2111,10 @@ public final class Utilities {
* is a SQLRecoverableException or SQLNonTransientException. For SQLRecoverableException
* the caller needs to reconnect to the database and restart the whole transaction.
*
- * @param query the prepared statement of SQL.
- * @param type either SQLCommandType.QUERY or SQLCommandType.UPDATE
+ * @param cmd the SQL command
+ * @param stmt the prepared statement of SQL.
* @param baseWindow The base time window (in milliseconds) before the next retry.
- * see {@getRandomWaitTime} for details.
+ * see {@link #getRandomWaitTime} for details.
* @param maxRetries the maximum # of retries when getting a SQLTransientException.
* @throws SQLException throws SQLRecoverableException or SQLNonTransientException the
* first time it is caught, or SQLTransientException when the maxRetries has reached.
@@ -2157,8 +2155,8 @@ public final class Utilities {
* the caller needs to reconnect to the database and restart the whole transaction.
*
* @param connectionString the JDBC connection string.
- * @param baseWindow The base time window (in milliseconds) before the next retry.
- * see {@getRandomWaitTime} for details.
+ * @param waitWindow The base time window (in milliseconds) before the next retry.
+ * see {@link #getRandomWaitTime} for details.
* @param maxRetries the maximum # of retries when getting a SQLTransientException.
* @throws SQLException throws SQLRecoverableException or SQLNonTransientException the
* first time it is caught, or SQLTransientException when the maxRetries has reached.
@@ -2199,8 +2197,8 @@ public final class Utilities {
*
* @param conn a JDBC connection.
* @param stmt the SQL statement to be prepared.
- * @param baseWindow The base time window (in milliseconds) before the next retry.
- * see {@getRandomWaitTime} for details.
+ * @param waitWindow The base time window (in milliseconds) before the next retry.
+ * see {@link #getRandomWaitTime} for details.
* @param maxRetries the maximum # of retries when getting a SQLTransientException.
* @throws SQLException throws SQLRecoverableException or SQLNonTransientException the
* first time it is caught, or SQLTransientException when the maxRetries has reached.
@@ -2252,13 +2250,13 @@ public final class Utilities {
baseWindow * (failures + 1) * r.nextDouble()); // expanding time window for each failure
}
+ public static final char sqlEscapeChar = '\\';
+
/**
* Escape the '_', '%', as well as the escape characters inside the string key.
* @param key the string that will be used for the SQL LIKE operator.
- * @param escape the escape character
* @return a string with escaped '_' and '%'.
*/
- public static final char sqlEscapeChar = '\\';
public static String escapeSqlLike(String key) {
StringBuffer sb = new StringBuffer(key.length());
for (char c: key.toCharArray()) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinDoubleKeys.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinDoubleKeys.java
index 6cf144b..702d947 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinDoubleKeys.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinDoubleKeys.java
@@ -40,8 +40,8 @@ public class MapJoinDoubleKeys extends AbstractMapJoinKey {
}
/**
- * @param metadataTag
- * @param obj
+ * @param obj1
+ * @param obj2
*/
public MapJoinDoubleKeys(Object obj1, Object obj2) {
this.obj1 = obj1;
@@ -148,7 +148,7 @@ public class MapJoinDoubleKeys extends AbstractMapJoinKey {
}
/**
- * @param obj
+ * @param obj1
* the obj to set
*/
public void setObj1(Object obj1) {
@@ -163,7 +163,7 @@ public class MapJoinDoubleKeys extends AbstractMapJoinKey {
}
/**
- * @param obj
+ * @param obj2
* the obj to set
*/
public void setObj2(Object obj2) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java
index a344daf..a941580 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java
@@ -42,7 +42,6 @@ public class MapJoinObjectKey extends AbstractMapJoinKey {
}
/**
- * @param metadataTag
* @param obj
*/
public MapJoinObjectKey(Object[] obj) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinSingleKey.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinSingleKey.java
index 9ac071b..6c02d7f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinSingleKey.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinSingleKey.java
@@ -39,7 +39,6 @@ public class MapJoinSingleKey extends AbstractMapJoinKey {
}
/**
- * @param metadataTag
* @param obj
*/
public MapJoinSingleKey(Object obj) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
index 55f49ba..e2f3ea5 100644
--- ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
@@ -352,8 +352,9 @@ public class HiveHistory {
/**
* Serialize the task counters and set as a task property.
*
+ * @param queryId
* @param taskId
- * @param rj
+ * @param ctrs
*/
public void setTaskCounters(String queryId, String taskId, Counters ctrs) {
String id = queryId + ":" + taskId;
diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
index 7cc4763..b687a2f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
+++ ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
@@ -350,7 +350,7 @@ public class LineageInfo implements Serializable {
}
/**
- * @param basecols the baseCols to set
+ * @param baseCols the baseCols to set
*/
public void setBaseCols(List baseCols) {
this.baseCols = baseCols;
diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
index 1a42a9f..1e577da 100644
--- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
+++ ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
@@ -40,7 +40,7 @@ public interface HiveIndexHandler extends Configurable {
/**
* Determines whether this handler implements indexes by creating an index
* table.
- *
+ *
* @return true if index creation implies creation of an index table in Hive;
* false if the index representation is not stored in a Hive table
*/
@@ -49,13 +49,13 @@ public interface HiveIndexHandler extends Configurable {
/**
* Requests that the handler validate an index definition and fill in
* additional information about its stored representation.
- *
+ *
* @param baseTable
* the definition of the table being indexed
- *
+ *
* @param index
* the definition of the index being created
- *
+ *
* @param indexTable
* a partial definition of the index table to be used for storing the
* index representation, or null if usesIndexTable() returns false;
@@ -63,9 +63,9 @@ public interface HiveIndexHandler extends Configurable {
* information about input/output format) and/or the index table's
* definition (typically with additional columns containing the index
* representation, e.g. pointers into HDFS).
- *
- * @throw HiveException if the index definition is invalid with respect to
- * either the base table or the supplied index table definition
+ *
+ * @throws HiveException if the index definition is invalid with respect to
+ * either the base table or the supplied index table definition
*/
void analyzeIndexDefinition(
org.apache.hadoop.hive.metastore.api.Table baseTable,
@@ -76,38 +76,32 @@ public interface HiveIndexHandler extends Configurable {
/**
* Requests that the handler generate a plan for building the index; the plan
* should read the base table and write out the index representation.
- *
- * @param outputs
- * @param inputs
- *
- * @param baseTable
+ *
+ * @param baseTbl
* the definition of the table being indexed
- *
+ *
* @param index
* the definition of the index
- *
- * @param indexTblPartitions
- * list of index partitions
- *
+ *
* @param baseTblPartitions
* list of base table partitions with each element mirrors to the
* corresponding one in indexTblPartitions
- *
- * @param indexTable
+ *
+ * @param indexTbl
* the definition of the index table, or null if usesIndexTable()
* returns null
- *
+ *
* @param inputs
- * inputs for hooks, supplemental outputs going
+ * inputs for hooks, supplemental outputs going
* along with the return value
- *
+ *
* @param outputs
- * outputs for hooks, supplemental outputs going
+ * outputs for hooks, supplemental outputs going
* along with the return value
- *
+ *
* @return list of tasks to be executed in parallel for building the index
- *
- * @throw HiveException if plan generation fails
+ *
+ * @throws HiveException if plan generation fails
*/
List> generateIndexBuildTaskList(
org.apache.hadoop.hive.ql.metadata.Table baseTbl,
@@ -123,11 +117,11 @@ public interface HiveIndexHandler extends Configurable {
* provided, it is up to the handler whether to use none, one, some or all of
* them. The supplied predicate may reference any of the columns from any of
* the indexes. If the handler decides to use more than one index, it is
- * responsible for generating tasks to combine their search results
+ * responsible for generating tasks to combine their search results
* (e.g. performing a JOIN on the result).
* @param indexes
* @param predicate
- * @param parseContext
+ * @param pctx
* @param queryContext contains results, such as query tasks and input configuration
*/
void generateIndexQuery(List indexes, ExprNodeDesc predicate,
diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
index c7da20e..a22cbd4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
+++ ql/src/java/org/apache/hadoop/hive/ql/index/IndexSearchCondition.java
@@ -44,7 +44,7 @@ public class IndexSearchCondition
*
* @param constantDesc constant value to search for
*
- * @Param comparisonExpr the original comparison expression
+ * @param comparisonExpr the original comparison expression
*/
public IndexSearchCondition(
ExprNodeColumnDesc columnDesc,
@@ -61,7 +61,7 @@ public class IndexSearchCondition
public void setColumnDesc(ExprNodeColumnDesc columnDesc) {
this.columnDesc = columnDesc;
}
-
+
public ExprNodeColumnDesc getColumnDesc() {
return columnDesc;
}
@@ -89,7 +89,7 @@ public class IndexSearchCondition
public ExprNodeDesc getComparisonExpr() {
return comparisonExpr;
}
-
+
@Override
public String toString() {
return comparisonExpr.getExprString();
diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index ef6cdf7..08e127c 100644
--- ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -333,7 +333,7 @@ public abstract class HiveContextAwareRecordReader implements RecordReader
/**
* Returns true if the current comparison is in the list of stop comparisons, i.e. we've found
* all records which won't be filtered
- * @return
+ * @return true if the current comparison is found
*/
public boolean foundAllTargets() {
if (this.getIOContext().getComparison() == null ||
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java
index 5b9bfaa..c8fc06f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/AuthorizationException.java
@@ -30,8 +30,8 @@ public class AuthorizationException extends RuntimeException {
/**
* Constructs an {@link AuthorizationException} with the specified detail
* message.
- *
- * @param s
+ *
+ * @param message
* the detail message.
*/
public AuthorizationException(String message) {
@@ -40,7 +40,7 @@ public class AuthorizationException extends RuntimeException {
/**
* Constructs an {@link AuthorizationException} with the specified cause.
- *
+ *
* @param cause
* the cause
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index ee93dbf..0531620 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -248,7 +248,6 @@ public class Hive {
* @param name
* @param deleteData
* @param ignoreUnknownDb if true, will ignore NoSuchObjectException
- * @return
* @throws HiveException
* @throws NoSuchObjectException
*/
@@ -264,7 +263,6 @@ public class Hive {
* @param ignoreUnknownDb if true, will ignore NoSuchObjectException
* @param cascade if true, delete all tables on the DB if exists. Othewise, the query
* will fail if table still exists.
- * @return
* @throws HiveException
* @throws NoSuchObjectException
*/
@@ -409,8 +407,8 @@ public class Hive {
*
* @param tblName
* name of the existing table
- * @param newTbl
- * new name of the table. could be the old name
+ * @param newPart
+ * new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
@@ -458,7 +456,7 @@ public class Hive {
throw new HiveException("Unable to rename partition to the same name: old and new partition cols don't match. ");
}
List pvals = new ArrayList();
-
+
for (FieldSchema field : tbl.getPartCols()) {
String val = oldPartSpec.get(field.getName());
if (val == null || val.length() == 0) {
@@ -763,16 +761,9 @@ public class Hive {
}
/**
- * Drops table along with the data in it. If the table doesn't exist
- * then it is a no-op
- * @param dbName database where the table lives
- * @param tableName table to drop
- * @throws HiveException thrown if the drop fails
* Drops table along with the data in it. If the table doesn't exist then it
* is a no-op
*
- * @param dbName
- * database where the table lives
* @param tableName
* table to drop
* @throws HiveException
@@ -784,11 +775,6 @@ public class Hive {
}
/**
- * Drops table along with the data in it. If the table doesn't exist
- * then it is a no-op
- * @param dbName database where the table lives
- * @param tableName table to drop
- * @throws HiveException thrown if the drop fails
* Drops table along with the data in it. If the table doesn't exist then it
* is a no-op
*
@@ -806,6 +792,7 @@ public class Hive {
/**
* Drops the table.
*
+ * @param dbName
* @param tableName
* @param deleteData
* deletes the underlying data along with metadata
@@ -834,7 +821,7 @@ public class Hive {
/**
* Returns metadata for the table named tableName
* @param tableName the name of the table
- * @return
+ * @return the table metadata
* @throws HiveException if there's an internal error or if the
* table doesn't exist
*/
@@ -847,7 +834,7 @@ public class Hive {
* Returns metadata for the table named tableName
* @param tableName the name of the table
* @param throwException controls whether an exception is thrown or a returns a null
- * @return
+ * @return the table metadata
* @throws HiveException if there's an internal error or if the
* table doesn't exist
*/
@@ -1052,15 +1039,9 @@ public class Hive {
}
/**
- * @param userName
- * principal name
- * @param isRole
- * is the given principal name a role
- * @param isGroup
- * is the given principal name a group
* @param privileges
* a bag of privileges
- * @return
+ * @return true on success
* @throws HiveException
*/
public boolean revokePrivileges(PrivilegeBag privileges)
@@ -1118,8 +1099,6 @@ public class Hive {
* @param holdDDLTime if true, force [re]create the partition
* @param inheritTableSpecs if true, on [re]creating the partition, take the
* location/inputformat/outputformat/serde details from table spec
- * @param tmpDirPath
- * The temporary directory.
*/
public void loadPartition(Path loadPath, String tableName,
Map partSpec, boolean replace, boolean holdDDLTime,
@@ -1195,9 +1174,9 @@ public class Hive {
* @param tableName
* @param partSpec
* @param replace
- * @param tmpDirPath
- * @param numSp: number of static partitions in the partition spec
- * @return
+ * @param numDP number of dynamic partitions
+ * @param holdDDLTime
+ * @return a list of strings with the dynamic partition paths
* @throws HiveException
*/
public ArrayList> loadDynamicPartitions(Path loadPath,
@@ -1221,7 +1200,7 @@ public class Hive {
validPartitions.add(s.getPath().getParent());
}
}
-
+
if (validPartitions.size() == 0) {
LOG.warn("No partition is genereated by dynamic partitioning");
}
@@ -1242,7 +1221,7 @@ public class Hive {
Path partPath = iter.next();
assert fs.getFileStatus(partPath).isDir():
"partitions " + partPath + " is not a directory !";
-
+
// generate a full partition specification
LinkedHashMap fullPartSpec = new LinkedHashMap(partSpec);
Warehouse.makeSpecFromName(fullPartSpec, partPath);
@@ -1270,8 +1249,7 @@ public class Hive {
* name of table to be loaded.
* @param replace
* if true - replace files in the table, otherwise add files to table
- * @param tmpDirPath
- * The temporary directory.
+ * @param holdDDLTime
*/
public void loadTable(Path loadPath, String tableName, boolean replace,
boolean holdDDLTime) throws HiveException {
@@ -1321,7 +1299,7 @@ public class Hive {
* @param partParams
* partition parameters
* @param inputFormat the inputformat class
- * @param outputformat the outputformat class
+ * @param outputFormat the outputformat class
* @param numBuckets the number of buckets
* @param cols the column schema
* @param serializationLib the serde class
@@ -1641,7 +1619,7 @@ public class Hive {
if (!tbl.isPartitioned()) {
throw new HiveException("Partition spec should only be supplied for a " +
- "partitioned table");
+ "partitioned table");
}
List names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
@@ -1733,7 +1711,7 @@ public class Hive {
/**
* Get the name of the current database
- * @return
+ * @return the current database name
*/
public String getCurrentDatabase() {
if (null == currentDatabase) {
@@ -1833,7 +1811,7 @@ public class Hive {
* user name
* @param group_names
* group names
- * @return
+ * @return the privilege set
* @throws HiveException
*/
public PrincipalPrivilegeSet get_privilege_set(HiveObjectType objectType,
@@ -1858,7 +1836,7 @@ public class Hive {
* @param tableName
* @param partValues
* @param columnName
- * @return
+ * @return list of privileges
* @throws HiveException
*/
public List showPrivilegeGrant(
@@ -2046,20 +2024,20 @@ public class Hive {
// rename src directory to destf
if (srcs.length == 1 && srcs[0].isDir()) {
- // rename can fail if the parent doesn't exist
- if (!fs.exists(destf.getParent())) {
- fs.mkdirs(destf.getParent());
- }
- if (fs.exists(destf)) {
- fs.delete(destf, true);
- }
-
- boolean b = fs.rename(srcs[0].getPath(), destf);
- if (!b) {
- throw new HiveException("Unable to move results from " + srcs[0].getPath()
- + " to destination directory: " + destf);
- }
- LOG.debug("Renaming:" + srcf.toString() + " to " + destf.toString() + ",Status:" + b);
+ // rename can fail if the parent doesn't exist
+ if (!fs.exists(destf.getParent())) {
+ fs.mkdirs(destf.getParent());
+ }
+ if (fs.exists(destf)) {
+ fs.delete(destf, true);
+ }
+
+ boolean b = fs.rename(srcs[0].getPath(), destf);
+ if (!b) {
+ throw new HiveException("Unable to move results from " + srcs[0].getPath()
+ + " to destination directory: " + destf);
+ }
+ LOG.debug("Renaming:" + srcf.toString() + " to " + destf.toString() + ",Status:" + b);
} else { // srcf is a file or pattern containing wildcards
if (!fs.exists(destf)) {
fs.mkdirs(destf);
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index d0ff67e..14c82fa 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -290,7 +290,7 @@ public class Partition implements Serializable {
}
/**
- * @param class1
+ * @param outputFormatClass
*/
public void setOutputFormatClass(Class extends HiveOutputFormat> outputFormatClass) {
this.outputFormatClass = outputFormatClass;
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index bf2ab34..8aa23aa 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -526,8 +526,6 @@ public class Table implements Serializable {
*
* @param srcf
* Source directory
- * @param tmpd
- * Temporary directory
*/
protected void replaceFiles(Path srcf) throws HiveException {
Path tableDest = new Path(getDataLocation().getPath());
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
index 60f7f7c..b4b558b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java
@@ -244,13 +244,15 @@ public class MapJoinProcessor implements Transform {
/**
* convert a regular join to a a map-side join.
*
+ * @param opParseCtxMap
* @param op
* join operator
- * @param qbJoin
+ * @param joinTree
* qb join tree
* @param mapJoinPos
* position of the source to be read as part of map-reduce framework. All other sources
* are cached in memory
+ * @param noCheckOuterJoin
*/
public static MapJoinOperator convertMapJoin(
LinkedHashMap, OpParseContext> opParseCtxMap,
@@ -506,7 +508,7 @@ public class MapJoinProcessor implements Transform {
*
*
* @param condns
- * @return
+ * @return list of big table candidates
*/
public static HashSet getBigTableCandidates(JoinCondDesc[] condns) {
HashSet bigTableCandidates = new HashSet();
@@ -928,7 +930,7 @@ public class MapJoinProcessor implements Transform {
/**
* @param listMapJoinsNoRed
- * @param pGraphContext2
+ * @param pGraphContext
*/
public MapJoinWalkerCtx(List> listMapJoinsNoRed,
ParseContext pGraphContext) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
index c6e82e5..0b55ac4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteParseContextGenerator.java
@@ -53,7 +53,7 @@ public final class RewriteParseContextGenerator {
* Parse the input {@link String} command and generate a ASTNode tree.
* @param conf
* @param command
- * @return
+ * @return the parse context
* @throws SemanticException
*/
public static ParseContext generateOperatorTree(HiveConf conf,
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
index 3a5c3c4..fe403c4 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
@@ -100,7 +100,7 @@ public class LineageCtx implements NodeProcessorCtx {
*
* @param op The operator of the column whose dependency is being modified.
* @param ci The column info of the associated column.
- * @param dependency The new dependency.
+ * @param dep The new dependency.
*/
public void mergeDependency(Operator extends Serializable> op,
ColumnInfo ci, Dependency dep) {
@@ -173,7 +173,7 @@ public class LineageCtx implements NodeProcessorCtx {
*
* @param old_type The old dependency type.
* @param curr_type The current operators dependency type.
- * @return
+ * @return the dependency type
*/
public static LineageInfo.DependencyType getNewDependencyType(
LineageInfo.DependencyType old_type, LineageInfo.DependencyType curr_type) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java
index ab614e0..f1d4218 100644
--- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java
+++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalPlanResolver.java
@@ -28,9 +28,9 @@ public interface PhysicalPlanResolver {
/**
* All physical plan resolvers have to implement this entry method.
- *
+ *
* @param pctx
- * @return
+ * @return the physical plan
*/
PhysicalContext resolve(PhysicalContext pctx) throws SemanticException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index 23a011d..0c4dac0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -319,7 +319,8 @@ public abstract class BaseSemanticAnalyzer {
}
/**
- * @param Get the name from a table node
+ * Get the name from a table node.
+ * @param tableNameNode the table node
* @return if DB name is give, db.tab is returned. Otherwise, tab.
*/
public static String getUnescapedName(ASTNode tableNameNode) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java
index 0ad9076..ae371f3 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java
@@ -48,7 +48,7 @@ public interface HiveSemanticAnalyzerHookContext extends Configurable{
/**
* The following methods will only be available to hooks executing postAnalyze. If called in a
* preAnalyze method, they should return null.
- * @return
+ * @return the set of read entities
*/
public Set getInputs();
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
index 4c5a5f3..8157bcd 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java
@@ -104,12 +104,14 @@ public class ParseContext {
}
/**
+ * @param conf
* @param qb
* current QB
* @param ast
* current parse tree
* @param opToPartPruner
* map from table scan operator to partition pruner
+ * @param opToPartList
* @param topOps
* list of operators for the top query
* @param topSelOps
@@ -121,7 +123,6 @@ public class ParseContext {
* context needed join processing (map join specifically)
* @param topToTable
* the top tables being processed
- * @param fopToTable the table schemas that are being inserted into
* @param loadTableWork
* list of destination tables being loaded
* @param loadFileWork
@@ -129,13 +130,16 @@ public class ParseContext {
* @param ctx
* parse context
* @param idToTableNameMap
- * @param destTableId
* @param uCtx
+ * @param destTableId
* @param listMapJoinOpsNoReducer
* list of map join operators with no reducer
+ * @param groupOpToInputTables
+ * @param prunedPartitions
* @param opToSamplePruner
* operator to sample pruner map
- * @param semanticInputs
+ * @param globalLimitCtx
+ * @param nameToSplitSample
* @param rootTasks
*/
public ParseContext(
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java
index 854a8d6..4332776 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java
@@ -260,7 +260,7 @@ public class QBJoinTree implements Serializable{
* Remeber the mapping of table alias to set of columns.
*
* @param alias
- * @param columns
+ * @param column
*/
public void addRHSSemijoinColumns(String alias, ASTNode column) {
ArrayList cols = rhsSemijoin.get(alias);
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 28ed827..b865f04 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -108,8 +108,9 @@ public class DDLWork implements Serializable {
}
/**
- * @param dropDatabaseDesc
- * Drop Database descriptor
+ * @param inputs
+ * @param outputs
+ * @param descDatabaseDesc Database descriptor
*/
public DDLWork(HashSet inputs, HashSet outputs,
DescDatabaseDesc descDatabaseDesc) {
@@ -324,8 +325,9 @@ public class DDLWork implements Serializable {
}
/**
- * @param touchDesc
- * information about the table/partitions that we want to touch
+ * @param inputs
+ * @param outputs
+ * @param simpleDesc
*/
public DDLWork(HashSet inputs, HashSet outputs,
AlterTableSimpleDesc simpleDesc) {
@@ -484,8 +486,8 @@ public class DDLWork implements Serializable {
}
/**
- * @param alterTblDesc
- * the alterTblDesc to set
+ * @param alterIndexDesc
+ * the alterIndexDesc to set
*/
public void setAlterIndexDesc(AlterIndexDesc alterIndexDesc) {
this.alterIndexDesc = alterIndexDesc;
@@ -872,9 +874,6 @@ public class DDLWork implements Serializable {
this.revokeDesc = revokeDesc;
}
- /**
- * @return
- */
public GrantRevokeRoleDDL getGrantRevokeRoleDDL() {
return grantRevokeRoleDDL;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java
index f7854e6..99e1fa8 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/DescDatabaseDesc.java
@@ -44,9 +44,9 @@ public class DescDatabaseDesc extends DDLDesc implements Serializable {
}
/**
- * @param partSpec
* @param resFile
- * @param tableName
+ * @param dbName
+ * @param isExt
*/
public DescDatabaseDesc(Path resFile, String dbName, boolean isExt) {
this.isExt = isExt;
@@ -82,8 +82,8 @@ public class DescDatabaseDesc extends DDLDesc implements Serializable {
}
/**
- * @param tableName
- * the tableName to set
+ * @param db
+ * the database name to set
*/
public void setDatabaseName(String db) {
this.dbName = db;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java
index 1a346f2..ae6487d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java
@@ -97,7 +97,7 @@ public class DescTableDesc extends DDLDesc implements Serializable {
}
/**
- * @param isFormatted
+ * @param isFormat
* the isFormat to set
*/
public void setFormatted(boolean isFormat) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
index df9097d..9ce4039 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java
@@ -312,7 +312,7 @@ public class JoinDesc implements Serializable {
/**
* set the mapping from tbl to dir for small keys.
*
- * @param bigKeysDirMap
+ * @param smallKeysDirMap
*/
public void setSmallKeysDirMap(Map> smallKeysDirMap) {
this.smallKeysDirMap = smallKeysDirMap;
@@ -343,7 +343,7 @@ public class JoinDesc implements Serializable {
}
/**
- * @param skewKeysValuesTable
+ * @param skewKeysValuesTables
* set the table desc for storing skew keys and their corresponding
* value;
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java
index 799307d..b7a7e4b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/MsckDesc.java
@@ -41,10 +41,10 @@ public class MsckDesc extends DDLWork implements Serializable {
*/
public MsckDesc() {
}
-
+
/**
* Description of a msck command.
- *
+ *
* @param tableName
* Table to check, can be null.
* @param partSpecs
@@ -89,7 +89,7 @@ public class MsckDesc extends DDLWork implements Serializable {
}
/**
- * @param partitionSpec
+ * @param partSpecs
* partitions to check.
*/
public void setPartSpecs(ArrayList> partSpecs) {
@@ -119,7 +119,7 @@ public class MsckDesc extends DDLWork implements Serializable {
}
/**
- * @param remove
+ * @param repairPartitions
* stale / add new partitions found during the check
*/
public void setRepairPartitions(boolean repairPartitions) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java
index 82ead2e..4059b92 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java
@@ -93,8 +93,7 @@ public class ShowPartitionsDesc extends DDLDesc implements Serializable {
}
/**
- * @param tabName
- * the table whose partitions have to be listed
+ * @param partSpec the partSpec to set.
*/
public void setPartSpec(Map partSpec) {
this.partSpec = partSpec;
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java
index af0ccc0..15613ed 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java
@@ -57,7 +57,7 @@ public class ShowTableStatusDesc extends DDLDesc implements Serializable {
*/
public ShowTableStatusDesc() {
}
-
+
/**
* @param pattern
* names of tables to show
@@ -74,7 +74,7 @@ public class ShowTableStatusDesc extends DDLDesc implements Serializable {
* data base name
* @param pattern
* names of tables to show
- * @param part
+ * @param partSpec
* partition specification
*/
public ShowTableStatusDesc(String resFile, String dbName, String pattern,
diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java
index 7e57716..5e5e22f 100644
--- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java
+++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java
@@ -283,7 +283,7 @@ public class ExprWalkerInfo implements NodeProcessorCtx {
/**
* Returns list of non-final candidate predicate for each map.
*
- * @return
+ * @return list of non-final candidate predicates
*/
public Map> getNonFinalCandidates() {
return nonFinalPreds;
diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java
index 66f7698..55a2e79 100644
--- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java
+++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java
@@ -42,12 +42,11 @@ public interface HiveAuthorizationProvider extends Configurable{
/**
* Authorization user level privileges.
- *
+ *
* @param readRequiredPriv
* a list of privileges needed for inputs.
* @param writeRequiredPriv
* a list of privileges needed for outputs.
- * @return
* @throws HiveException
* @throws AuthorizationException
*/
@@ -57,14 +56,13 @@ public interface HiveAuthorizationProvider extends Configurable{
/**
* Authorization privileges against a database object.
- *
+ *
* @param db
* database
* @param readRequiredPriv
* a list of privileges needed for inputs.
* @param writeRequiredPriv
* a list of privileges needed for outputs.
- * @return
* @throws HiveException
* @throws AuthorizationException
*/
@@ -74,14 +72,13 @@ public interface HiveAuthorizationProvider extends Configurable{
/**
* Authorization privileges against a hive table object.
- *
+ *
* @param table
* table object
* @param readRequiredPriv
* a list of privileges needed for inputs.
* @param writeRequiredPriv
* a list of privileges needed for outputs.
- * @return
* @throws HiveException
* @throws AuthorizationException
*/
@@ -91,14 +88,13 @@ public interface HiveAuthorizationProvider extends Configurable{
/**
* Authorization privileges against a hive partition object.
- *
+ *
* @param part
* partition object
* @param readRequiredPriv
* a list of privileges needed for inputs.
* @param writeRequiredPriv
* a list of privileges needed for outputs.
- * @return
* @throws HiveException
* @throws AuthorizationException
*/
@@ -110,7 +106,7 @@ public interface HiveAuthorizationProvider extends Configurable{
* Authorization privileges against a list of columns. If the partition object
* is not null, look at the column grants for the given partition. Otherwise
* look at the table column grants.
- *
+ *
* @param table
* table object
* @param part
@@ -121,7 +117,6 @@ public interface HiveAuthorizationProvider extends Configurable{
* a list of privileges needed for inputs.
* @param writeRequiredPriv
* a list of privileges needed for outputs.
- * @return
* @throws HiveException
* @throws AuthorizationException
*/
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFPercentile.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFPercentile.java
index 2b91b8b..e380eee 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFPercentile.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFPercentile.java
@@ -36,7 +36,8 @@ import org.apache.hadoop.io.LongWritable;
* UDAF for calculating the percentile values.
* There are several definitions of percentile, and we take the method recommended by
* NIST.
- * @see http://en.wikipedia.org/wiki/Percentile#Alternative_methods
+ * @see
+ * Percentile references
*/
@Description(name = "percentile",
value = "_FUNC_(expr, pc) - Returns the percentile(s) of expr at pc (range: [0,1])."
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
index aee448b..1471be7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTF.java
@@ -36,7 +36,7 @@ public abstract class GenericUDTF {
/**
* Initialize this GenericUDTF. This will be called only once per instance.
*
- * @param args
+ * @param argOIs
* An array of ObjectInspectors for the arguments
* @return A StructObjectInspector for output. The output struct represents a
* row of the table where the fields of the stuct are the columns. The
@@ -49,7 +49,7 @@ public abstract class GenericUDTF {
/**
* Give a set of arguments for the UDTF to process.
*
- * @param o
+ * @param args
* object array of arguments
*/
public abstract void process(Object[] args) throws HiveException;
diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java
index 3d0289d..0ef8fca 100644
--- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java
+++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NGramEstimator.java
@@ -34,7 +34,7 @@ import org.apache.commons.logging.LogFactory;
* A generic, re-usable n-gram estimation class that supports partial aggregations.
* The algorithm is based on the heuristic from the following paper:
* Yael Ben-Haim and Elad Tom-Tov, "A streaming parallel decision tree algorithm",
- * J. Machine Learning Research 11 (2010), pp. 849--872.
+ * J. Machine Learning Research 11 (2010), pp. 849--872.
*
* In particular, it is guaranteed that frequencies will be under-counted. With large
* data and a reasonable precision factor, this undercounting appears to be on the order
@@ -46,11 +46,11 @@ public class NGramEstimator {
private int pf;
private int n;
private HashMap, Double> ngrams;
-
+
/**
* Creates a new n-gram estimator object. The 'n' for n-grams is computed dynamically
- * when data is fed to the object.
+ * when data is fed to the object.
*/
public NGramEstimator() {
k = 0;
@@ -82,7 +82,7 @@ public class NGramEstimator {
}
/**
- * Resets an n-gram estimator object to its initial state.
+ * Resets an n-gram estimator object to its initial state.
*/
public void reset() {
ngrams.clear();
@@ -96,13 +96,13 @@ public class NGramEstimator {
trim(true);
if(ngrams.size() < 1) { // SQL standard - return null for zero elements
return null;
- }
+ }
// Sort the n-gram list by frequencies in descending order
ArrayList