diff --git llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java index 7d0d6d2..3b7faad 100644 --- llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java +++ llap-client/src/java/org/apache/hadoop/hive/llap/ext/LlapTaskUmbilicalExternalClient.java @@ -152,7 +152,6 @@ public InetSocketAddress getAddress() { /** * Submit the work for actual execution. - * @throws InvalidProtocolBufferException */ public void submitWork(SubmitWorkRequestProto request, String llapHost, int llapPort) { // Register the pending events to be sent for this spec. diff --git llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java index 081995c..70515c4 100644 --- llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java +++ llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java @@ -20,7 +20,7 @@ public interface ServiceInstance { /** - * Worker identity is a UUID (unique across restarts), to identify a node which died & was brought + * Worker identity is a UUID (unique across restarts), to identify a node which died & was brought * back on the same host/port */ public String getWorkerIdentity(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java index 33abbb2..7634852 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java @@ -27,7 +27,7 @@ /** * Determine the database product type - * @param conn database connection + * @param productName string to defer database connection * @return database product type */ public static DatabaseProduct determineDatabaseProduct(String productName) throws SQLException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java index 6149224..99bd7b0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java @@ -82,7 +82,7 @@ public static void registerIfNot(long timeout) { /** * reset the timeout value of this timer. - * @param timeout + * @param timeoutMs */ public static void resetTimeout(long timeoutMs) throws MetaException { if (timeoutMs <= 0) { @@ -139,7 +139,7 @@ public static void clear() { /** * Check whether the long running method timeout. - * @throws DeadlineException when the method timeout + * @throws MetaException when the method timeout */ public static void checkTimeout() throws MetaException { Deadline deadline = getCurrentDeadline(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 53f8118..288888c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -663,7 +663,7 @@ public Partition appendPartition(String dbName, String tableName, String partNam * @param partitionSpecs partitions specs of the parent partition to be exchanged * @param destDb the db of the destination table * @param destinationTableName the destination table name - @ @return new partition after exchanging + * @return new partition after exchanging */ @Override public Partition exchange_partition(Map partitionSpecs, @@ -679,7 +679,7 @@ public Partition exchange_partition(Map partitionSpecs, * @param partitionSpecs partitions specs of the parent partition to be exchanged * @param destDb the db of the destination table * @param destinationTableName the destination table name - @ @return new partitions after exchanging + * @return new partitions after exchanging */ @Override public List exchange_partitions(Map partitionSpecs, @@ -1217,7 +1217,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in * @param db_name the database name * @param tbl_name the table name * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @param max_parts the maximum number of partitions to return, * all partitions are returned if -1 is passed @@ -1490,7 +1490,7 @@ public boolean tableExists(String tableName) throws MetaException, * @param db_name the database name * @param tbl_name the table name * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @return number of partitions * @throws MetaException diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 023a289..90eebca 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -206,7 +206,7 @@ /** * Get a list of table names that match a filter. - * The filter operators are LIKE, <, <=, >, >=, =, <> + * The filter operators are LIKE, <, <=, >, >=, =, <> * * In the filter statement, values interpreted as strings must be enclosed in quotes, * while values interpreted as integers should not be. Strings and integers are the only @@ -218,12 +218,12 @@ * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times * and supports all filter operators except LIKE * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values - * and only supports the filter operators = and <>. + * and only supports the filter operators = and <>. * Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. * For example, to filter on parameter keys called "retention", the key name in the filter * statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" - * Also, = and <> only work for keys that exist in the tables. - * E.g., filtering on tables where key1 <> value will only + * Also, = and <> only work for keys that exist in the tables. + * E.g., filtering on tables where key1 <> value will only * return tables that have a value for the parameter key1. * Some example filter statements include: * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + @@ -575,7 +575,7 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in * @param dbName the database name * @param tableName the table name * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @return number of partitions * @throws MetaException @@ -591,7 +591,7 @@ public int getNumPartitionsByFilter(String dbName, String tableName, * @param db_name the database name * @param tbl_name the table name * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @param max_parts the maximum number of partitions to return, * all partitions are returned if -1 is passed @@ -1549,7 +1549,7 @@ void addDynamicPartitions(long txnId, String dbName, String tableName, List filterTableNames(String dbName, List tableList) throws MetaException; @@ -72,9 +72,8 @@ /** * Filter given list of tables - * @param dbName * @param tableList - * @returnList of filtered table names + * @return List of filtered table names */ public List filterTables(List
tableList) throws MetaException; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java index a0c8d3b..b62c45f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java @@ -55,7 +55,7 @@ /** * Run the thread in the background. This must not be called until - * {@link ##init(java.util.concurrent.atomic.AtomicBoolean, java.util.concurrent.atomic.AtomicBoolean)} has + * {@link MetaStoreThread#init(java.util.concurrent.atomic.AtomicBoolean,java.util.concurrent.atomic.AtomicBoolean)} has * been called. */ void start(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 870896c..544fd9f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -576,7 +576,6 @@ static public void deleteWHDirectory(Path path, Configuration conf, * @param conf * hive configuration * @return true or false depending on conformance - * @exception MetaException * if it doesn't match the pattern. */ static public boolean validateName(String name, Configuration conf) { @@ -692,7 +691,7 @@ static private boolean areColTypesCompatible(String oldType, String newType) { * validate column type * * if it is predefined, yes. otherwise no - * @param name + * @param type * @return */ static public String validateColumnType(String type) { @@ -850,7 +849,7 @@ public static String typeToThriftType(String type) { * @return String containing "Thrift * DDL#comma-separated-column-names#colon-separated-columntypes * Example: - * "struct result { a string, map b}#a,b#string:map" + * "struct result { a string, map<int,string> b}#a,b#string:map<int,string>" */ public static String getFullDDLFromFieldSchema(String structName, List fieldSchemas) { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index e38e8dd..41d7e81 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -62,7 +62,7 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express /** * Creates the proxy used to evaluate expressions. This is here to prevent circular - * dependency - ql -> metastore client <-> metastore server -> ql. If server and + * dependency - ql -> metastore client <-> metastore server -> ql. If server and * client are split, this can be removed. * @param conf Configuration. * @return The partition expression proxy. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index c22a1db..0378b84 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -405,7 +405,6 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * @return Relevant column statistics for the column for the given table * @throws NoSuchObjectException * @throws MetaException - * @throws InvalidInputException * */ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName, @@ -541,7 +540,6 @@ public void alterFunction(String dbName, String funcName, Function newFunction) * Drop a function definition. * @param dbName * @param funcName - * @return * @throws MetaException * @throws NoSuchObjectException * @throws InvalidObjectException diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index 51e4627..bb3fb2e 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -200,7 +200,7 @@ static public void setCmRoot(Path cmRoot) { * with the original location plus checksum. * @param path original path inside partition or table * @param conf - * @param chksum checksum of the file, can be retrieved by {@link getCksumString} + * @param chksum checksum of the file, can be retrieved by {@link getChksumString} * @return * @throws IOException * @throws MetaException diff --git metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java index dff1195..b8f2802 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java @@ -47,7 +47,7 @@ * @param db name of the database the table is in * @param table name of the table being inserted into * @param partVals list of partition values, can be null - * @param insertData the inserted files & their checksums + * @param insertData the inserted files and their checksums * @param status status of insert, true = success, false = failure * @param handler handler that is firing the event */ diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java index e687a69..4998633 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java @@ -2293,7 +2293,6 @@ private ColumnStatistics buildColStats(byte[] key, boolean fromTable) throws IOE /** * @param fileIds file ID list. - * @return Serialized file metadata. */ @Override public void getFileMetadata(List fileIds, ByteBuffer[] result) throws IOException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java index 0382e8a..d427fef 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java @@ -41,7 +41,7 @@ void storeFileMetadata(List fileIds, List metadataBuffers, /** * @param fileId The file ID. - * @param metadataBuffers Serialized file metadata. + * @param metadata Serialized file metadata. * @param addedCols The column names for additional columns created by file-format-specific * metadata handler, to be stored in the cache. * @param addedVals The values for addedCols; one value per added column. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java index 4a7ca6d..490d3b4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java @@ -23,7 +23,7 @@ * Utility function that constructs a notification filter to match a given db name and/or table name. * If dbName == null, fetches all warehouse events. * If dnName != null, but tableName == null, fetches all events for the db - * If dbName != null && tableName != null, fetches all events for the specified table + * If dbName != null && tableName != null, fetches all events for the specified table */ public class DatabaseAndTableFilter extends BasicFilter { private final String databaseName, tableName; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 10fcbea..8b12899 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -230,8 +230,7 @@ protected void accept(TreeVisitor visitor) throws MetaException { * tables that match the filter. * @param params * A map of parameter key to values for the filter statement. - * @param filterBuilder The filter builder that is used to build filter. - * @return a JDO filter statement + * @param filterBuffer The filter builder that is used to build filter. * @throws MetaException */ public void generateJDOFilter(Configuration conf, Table table, @@ -385,7 +384,6 @@ private void generateJDOFilterOverPartitions(Configuration conf, Table table, } /** - * @param operator operator * @return true iff filter pushdown for this operator can be done for integral types. */ public boolean canJdoUseStringsWithIntegral() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index 0b0df85..3eb3827 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -424,7 +424,7 @@ public void cleanupRecords(HiveObjectType type, Database db, Table table, } /** - * Once a {@link java.util.concurrent.ThreadPoolExecutor.Worker} submits a job to the cluster, + * Once a {@link java.util.concurrent.ThreadPoolExecutor} Worker submits a job to the cluster, * it calls this to update the metadata. * @param id {@link CompactionInfo#id} */ diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 6e0070b..ac2f88a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -75,7 +75,7 @@ public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse} to a * {@link org.apache.hadoop.hive.common.ValidTxnList}. This assumes that the caller intends to * compact the files, and thus treats only open transactions as invalid. Additionally any - * txnId > highestOpenTxnId is also invalid. This is to avoid creating something like + * txnId > highestOpenTxnId is also invalid. This is to avoid creating something like * delta_17_120 where txnId 80, for example, is still open. * @param txns txn list from the metastore * @return a valid txn list. diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java index 9da3071..2e021af 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MStorageDescriptor.java @@ -252,7 +252,7 @@ public void setSkewedColValues(List skewedColValues) { } /** - * @param skewedColValueLocationMaps the skewedColValueLocationMaps to set + * @param listBucketColValuesMapping the skewedColValueLocationMaps to set */ public void setSkewedColValueLocationMaps(Map listBucketColValuesMapping) { this.skewedColValueLocationMaps = listBucketColValuesMapping; diff --git spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java index e952f27..1922e41 100644 --- spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java +++ spark-client/src/main/java/org/apache/hive/spark/client/SparkClient.java @@ -49,15 +49,16 @@ /** * Asks the remote context to run a job immediately. - *

+ *

* Normally, the remote context will queue jobs and execute them based on how many worker * threads have been configured. This method will run the submitted job in the same thread * processing the RPC message, so that queueing does not apply. - *

+ *

+ *

* It's recommended that this method only be used to run code that finishes quickly. This * avoids interfering with the normal operation of the context. - *

- * Note: the {@link JobContext#monitor()} functionality is not available when using this method. + *

+ * Note: the JobContext#monitor() functionality is not available when using this method. * * @param job The job to execute. * @return A future to monitor the result of the job. diff --git spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java index 2b6ab29..00f5a17 100644 --- spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java +++ spark-client/src/main/java/org/apache/hive/spark/client/rpc/RpcDispatcher.java @@ -37,14 +37,15 @@ /** * An implementation of ChannelInboundHandler that dispatches incoming messages to an instance * method based on the method signature. - *

+ *

* A handler's signature must be of the form: - *

+ *

*
protected void handle(ChannelHandlerContext, MessageType)
- *

+ *

* Where "MessageType" must match exactly the type of the message to handle. Polymorphism is not * supported. Handlers can return a value, which becomes the RPC reply; if a null is returned, then * a reply is still sent, with an empty payload. + *

*/ @InterfaceAudience.Private public abstract class RpcDispatcher extends SimpleChannelInboundHandler {