recordItr)
* This method should be called at master node. Primary purpose of this is to
* do cleanups in case of failures.
*
- * @throws {@link HCatException} *
+ * @throws HCatException
*/
public abstract void abort(final WriterContext context) throws HCatException;
diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
index 1406e5a192..efafe0c641 100644
--- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
+++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
@@ -150,7 +150,7 @@ public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
* particular table by listening on a topic named "dbName.tableName" with message selector
* string {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_PARTITION_EVENT}.
- *
+ *
* TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for persistence, has been
* found to throw NPE when serializing objects that contain null. For this reason we override
* some fields in the StorageDescriptor of this notification. This should be fixed after
@@ -264,7 +264,7 @@ public void onAlterTable(AlterTableEvent tableEvent) throws MetaException {
* dropped tables by listening on topic "HCAT" with message selector string
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_TABLE_EVENT}
- *
+ *
* TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for persistence, has been
* found to throw NPE when serializing objects that contain null. For this reason we override
* some fields in the StorageDescriptor of this notification. This should be fixed after
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
index 5e122543a4..bc99b6c824 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
@@ -54,7 +54,7 @@
import java.util.Properties;
/**
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.AbstractRecordWriter}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.AbstractRecordWriter
*/
@Deprecated
public abstract class AbstractRecordWriter implements RecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
index 32dae458d4..85c3429329 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
@@ -44,9 +44,9 @@
/**
* Streaming Writer handles delimited input (eg. CSV).
- * Delimited input is parsed & reordered to match column order in table
+ * Delimited input is parsed & reordered to match column order in table
* Uses Lazy Simple Serde to process delimited input
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.StrictDelimitedInputWriter}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.StrictDelimitedInputWriter
*/
@Deprecated
public class DelimitedInputWriter extends AbstractRecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
index 3604630c43..66a1737839 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java
@@ -67,7 +67,7 @@
* Information about the hive end point (i.e. table or partition) to write to.
* A light weight object that does NOT internally hold on to resources such as
* network connections. It can be stored in Hashed containers such as sets and hash tables.
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.HiveStreamingConnection
*/
@Deprecated
public class HiveEndPoint {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
index 19078d2b31..0f3c0bcfea 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
@@ -20,7 +20,7 @@
/**
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.RecordWriter}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.RecordWriter
*/
@Deprecated
public interface RecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
index 0de8abc277..3af9aed36b 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
@@ -24,7 +24,7 @@
* Represents a connection to a HiveEndPoint. Used to acquire transaction batches.
* Note: the expectation is that there is at most 1 TransactionBatch outstanding for any given
* StreamingConnection. Violating this may result in "out of sequence response".
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.HiveStreamingConnection
*/
@Deprecated
public interface StreamingConnection {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
index 48e7e49552..d588f71a5c 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictJsonWriter.java
@@ -37,7 +37,7 @@
/**
* Streaming Writer handles utf8 encoded Json (Strict syntax).
* Uses org.apache.hive.hcatalog.data.JsonSerDe to process Json input
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.StrictJsonWriter}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.StrictJsonWriter
*/
@Deprecated
public class StrictJsonWriter extends AbstractRecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
index f0540e088f..6a9a47e85c 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StrictRegexWriter.java
@@ -41,7 +41,7 @@
/**
* Streaming Writer handles text input data with regex. Uses
* org.apache.hadoop.hive.serde2.RegexSerDe
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.StrictRegexWriter}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.StrictRegexWriter
*/
@Deprecated
public class StrictRegexWriter extends AbstractRecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
index 400fd49dd7..96aae02170 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
@@ -30,7 +30,7 @@
* Note on thread safety: At most 2 threads can run through a given TransactionBatch at the same
* time. One thread may call {@link #heartbeat()} and the other all other methods.
* Violating this may result in "out of sequence response".
- * @deprecated as of Hive 3.0.0, replaced by {@link org.apache.hive.streaming.HiveStreamingConnection}
+ * @deprecated as of Hive 3.0.0, replaced by org.apache.hive.streaming.HiveStreamingConnection
*/
@Deprecated
public interface TransactionBatch {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
index 67785d0623..a90d5d3187 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
@@ -47,7 +47,7 @@
* previously closed. The {@link MutatorCoordinator} will seamlessly handle transitions between groups, creating and
* closing {@link Mutator Mutators} as needed to write to the appropriate partition and bucket. New partitions will be
* created in the meta store if {@link AcidTable#createPartitions()} is set.
- *
+ *
* {@link #insert(List, Object) Insert} events must be artificially assigned appropriate bucket ids in the preceding
* grouping phase so that they are grouped correctly. Note that any write id or row id assigned to the
* {@link RecordIdentifier RecordIdentifier} of such events will be ignored by both the coordinator and the underlying
diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
index 8455a3f181..943a5a8ce0 100644
--- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
+++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
@@ -188,7 +188,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for HCatTable.
* @param hcatTable The HCatTable to be serialized into string form
* @return String representation of the HCatTable.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract String serializeTable(HCatTable hcatTable) throws HCatException;
@@ -204,7 +204,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for HCatPartition.
* @param hcatPartition The HCatPartition instance to be serialized.
* @return String representation of the HCatPartition.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract String serializePartition(HCatPartition hcatPartition) throws HCatException;
@@ -212,7 +212,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for a list of HCatPartition.
* @param hcatPartitions The HCatPartitions to be serialized.
* @return A list of Strings, each representing an HCatPartition.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract List serializePartitions(List hcatPartitions) throws HCatException;
@@ -220,7 +220,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Deserializer for an HCatPartition.
* @param hcatPartitionStringRep The String representation of the HCatPartition, presumably retrieved from {@link #serializePartition(HCatPartition)}
* @return HCatPartition instance reconstructed from the string.
- * @throws HCatException, on failure to deserialze.
+ * @throws HCatException on failure to deserialze.
*/
public abstract HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException;
@@ -228,7 +228,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Deserializer for a list of HCatPartition strings.
* @param hcatPartitionStringReps The list of HCatPartition strings to be deserialized.
* @return A list of HCatPartition instances, each reconstructed from an entry in the string-list.
- * @throws HCatException, on failure to deserialize.
+ * @throws HCatException on failure to deserialize.
*/
public abstract List deserializePartitions(List hcatPartitionStringReps) throws HCatException;
@@ -389,7 +389,8 @@ public abstract int addPartitionSpec(HCatPartitionSpec partitionSpec)
* @param tableName The table name.
* @param partitionSpec The partition specification, {[col_name,value],[col_name2,value2]}.
* @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true.
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract void dropPartitions(String dbName, String tableName,
Map partitionSpec, boolean ifExists)
@@ -408,7 +409,8 @@ public abstract void dropPartitions(String dbName, String tableName,
* @param partitionSpec The partition specification, {[col_name,value],[col_name2,value2]}.
* @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true.
* @param deleteData Whether to delete the underlying data.
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract void dropPartitions(String dbName, String tableName,
Map partitionSpec, boolean ifExists, boolean deleteData)
@@ -419,7 +421,7 @@ public abstract void dropPartitions(String dbName, String tableName,
* @param dbName The database name.
* @param tblName The table name.
* @param filter The filter string,
- * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+ * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
* be done only on string partition keys.
* @return list of partitions
* @throws HCatException
@@ -468,7 +470,8 @@ public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName,
* @param owner the owner
* @param renewerKerberosPrincipalName the renewer kerberos principal name
* @return the delegation token
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract String getDelegationToken(String owner,
String renewerKerberosPrincipalName) throws HCatException;
@@ -498,7 +501,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param dbName The name of the DB.
* @param tableName The name of the table.
* @return Topic-name for the message-bus on which messages will be sent for the specified table.
- * By default, this is set to .. Returns null when not set.
+ * By default, this is set to <db-name>.<table-name>. Returns null when not set.
*/
public abstract String getMessageBusTopicName(String dbName, String tableName) throws HCatException;
@@ -509,7 +512,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param lastEventId : The last event id that was processed for this reader. The returned
* replication tasks will start from this point forward
* @param maxEvents : Maximum number of events to consider for generating the
- * replication tasks. If < 1, then all available events will be considered.
+ * replication tasks. If < 1, then all available events will be considered.
* @param dbName : The database name for which we're interested in the events for.
* @param tableName : The table name for which we're interested in the events for - if null,
* then this function will behave as if it were running at a db level.
@@ -525,7 +528,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param lastEventId The last event id that was consumed by this reader. The returned
* notifications will start at the next eventId available this eventId that
* matches the filter.
- * @param maxEvents Maximum number of events to return. If < 1, then all available events will
+ * @param maxEvents Maximum number of events to return. If < 1, then all available events will
* be returned.
* @param filter Filter to determine if message should be accepted. If null, then all
* available events up to maxEvents will be returned.
diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
index 7aa8744eba..7c9c5a55a9 100644
--- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
+++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
@@ -180,7 +180,7 @@ public ReplicationTask withDstStagingDirProvider(StagingDirectoryProvider dstSta
* throws an IllegalArgumentException as well, a ReplicationTask will use the same key sent in.
* That way, the default will then be that the destination db name is the same as the src db name
*
- * If you want to use a Map mapping instead of a Function,
+ * If you want to use a Map<String,String> mapping instead of a Function<String,String>,
* simply call this function as .withTableNameMapping(ReplicationUtils.mapBasedFunction(tableMap))
* @param tableNameMapping
* @return this replication task
@@ -197,7 +197,7 @@ public ReplicationTask withTableNameMapping(Function tableNameMap
* throws an IllegalArgumentException as well, a ReplicationTask will use the same key sent in.
* That way, the default will then be that the destination db name is the same as the src db name
*
- * If you want to use a Map mapping instead of a Function,
+ * If you want to use a Map<String,String> mapping instead of a Function<String,String>,
* simply call this function as .withDbNameMapping(ReplicationUtils.mapBasedFunction(dbMap))
* @param dbNameMapping
* @return this replication task
@@ -214,9 +214,9 @@ protected void verifyActionable() {
}
/**
- * Returns a Iterable to send to a hive driver on the source warehouse
+ * Returns a Iterable<Command> to send to a hive driver on the source warehouse
*
- * If you *need* a List instead, you can use guava's
+ * If you *need* a List<Command> instead, you can use guava's
* ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
* get the underlying list, but this defeats the purpose of making this
* interface an Iterable rather than a List, since it is very likely
@@ -226,9 +226,9 @@ protected void verifyActionable() {
abstract public Iterable extends Command> getSrcWhCommands();
/**
- * Returns a Iterable to send to a hive driver on the source warehouse
+ * Returns a Iterable<Command> to send to a hive driver on the source warehouse
*
- * If you *need* a List instead, you can use guava's
+ * If you *need* a List<Command> instead, you can use guava's
* ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
* get the underlying list, but this defeats the purpose of making this
* interface an Iterable rather than a List, since it is very likely
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Meta.java hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
index 52a702e20e..e56e8e6f4e 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
@@ -259,7 +259,7 @@ public String normalizeIdentifierPart(String name) {
}
/**
- * Split qualified object to 2 parts: schema.tab.col -> schema.tab|col; tab.col -> tab|col
+ * Split qualified object to 2 parts: schema.tab.col -> schema.tab|col; tab.col -> tab|col
*/
public ArrayList splitIdentifierToTwoParts(String name) {
ArrayList parts = splitIdentifier(name);
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Utils.java hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
index 6bc0568023..2a86f55a3e 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
@@ -52,7 +52,7 @@ public static String unquoteString(String s) {
}
/**
- * Quote string and escape characters - ab'c -> 'ab''c'
+ * Quote string and escape characters - ab'c -> 'ab''c'
*/
public static String quoteString(String s) {
if(s == null) {
@@ -73,7 +73,7 @@ public static String quoteString(String s) {
}
/**
- * Merge quoted strings: 'a' 'b' -> 'ab'; 'a''b' 'c' -> 'a''bc'
+ * Merge quoted strings: 'a' 'b' -> 'ab'; 'a''b' 'c' -> 'a''bc'
*/
public static String mergeQuotedStrings(String s1, String s2) {
if(s1 == null || s2 == null) {
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Var.java hplsql/src/main/java/org/apache/hive/hplsql/Var.java
index d1151e716c..0a5aa9eb3f 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Var.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Var.java
@@ -624,7 +624,7 @@ else if (type == Type.TIMESTAMP) {
}
/**
- * Convert value to SQL string - string literals are quoted and escaped, ab'c -> 'ab''c'
+ * Convert value to SQL string - string literals are quoted and escaped, ab'c -> 'ab''c'
*/
public String toSqlString() {
if (value == null) {
diff --git jdbc/src/java/org/apache/hive/jdbc/Utils.java jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 852942e6a2..e0200b76b5 100644
--- jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -307,8 +307,8 @@ public static JdbcConnectionParams parseURL(String uri) throws JdbcUriParseExcep
/**
* Parse JDBC connection URL
* The new format of the URL is:
- * jdbc:hive2://:,:/dbName;sess_var_list?hive_conf_list#hive_var_list
- * where the optional sess, conf and var lists are semicolon separated = pairs.
+ * jdbc:hive2://<host1>:<port1>,<host2>:<port2>/dbName;sess_var_list?hive_conf_list#hive_var_list
+ * where the optional sess, conf and var lists are semicolon separated <key>=<val> pairs.
* For utilizing dynamic service discovery with HiveServer2 multiple comma separated host:port pairs can
* be specified as shown above.
* The JDBC driver resolves the list of uris and picks a specific server instance to connect to.
diff --git llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
index 479e053a1c..b2e4d78487 100644
--- llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
+++ llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
@@ -36,7 +36,8 @@
/**
* Utility to test query and data retrieval via the LLAP input format.
- * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf hive.zookeeper.client.port=2181 --hiveconf hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * from employee where employee_id < 10'
+ * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf hive.zookeeper.client.port=2181\
+ * --hiveconf hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * from employee where employee_id < 10'
*
*/
public class LlapDump {
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
index b71a358d60..82c581e3e8 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/ConsumerFeedback.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.hive.llap;
/**
- * Consumer feedback typically used by Consumer;
+ * Consumer feedback typically used by Consumer<T>;
* allows consumer to influence production of data.
*/
public interface ConsumerFeedback {
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
index 9591e48ce0..4dd3826a67 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
@@ -47,7 +47,7 @@
* can be thrown away, the reader will never touch it; but we need code in the reader to
* handle such cases to avoid disk reads for these "tails" vs real unmatched ranges.
* Some sort of InvalidCacheChunk could be placed to avoid them. TODO
- * @param base base offset for the ranges (stripe/stream offset in case of ORC).
+ * @param baseOffset base offset for the ranges (stripe/stream offset in case of ORC).
*/
DiskRangeList getFileData(Object fileKey, DiskRangeList range, long baseOffset,
DiskRangeListFactory factory, LowLevelCacheCounters qfCounters, BooleanRef gotAllData);
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
index 1ac8ec6b48..925bd9f245 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/PriorityBlockingDeque.java
@@ -26,15 +26,15 @@
/**
* An optionally-bounded {@linkplain BlockingDeque blocking deque} based on
* a navigable set.
- *
+ *
* The optional capacity bound constructor argument serves as a
* way to prevent excessive expansion. The capacity, if unspecified,
* is equal to {@link Integer#MAX_VALUE}.
- *
+ *
* This class and its iterator implement all of the
* optional methods of the {@link Collection} and {@link
* Iterator} interfaces.
- *
+ *
* This code is loosely based on the {@linkplain java.util.concurrent.LinkedBlockingDeque linked blocking deque} code.
*/
public class PriorityBlockingDeque
@@ -439,8 +439,8 @@ public boolean removeLastOccurrence(Object o) {
* Inserts the specified element to the deque unless it would
* violate capacity restrictions. When using a capacity-restricted deque,
* it is generally preferable to use method {@link #offer(Object) offer}.
- *
- * This method is equivalent to {@link #addLast}.
+ *
+ * This method is equivalent to {@link #addLast}.
*
* @throws IllegalStateException if the element cannot be added at this
* time due to capacity restrictions
@@ -481,8 +481,8 @@ public boolean offer(E e, long timeout, TimeUnit unit)
* Retrieves and removes the head of the queue represented by this deque.
* This method differs from {@link #poll poll} only in that it throws an
* exception if this deque is empty.
- *
- * This method is equivalent to {@link #removeFirst() removeFirst}.
+ *
+ * This method is equivalent to {@link #removeFirst() removeFirst}.
*
* @return the head of the queue represented by this deque
* @throws NoSuchElementException if this deque is empty
@@ -508,8 +508,8 @@ public E poll(long timeout, TimeUnit unit) throws InterruptedException {
* Retrieves, but does not remove, the head of the queue represented by
* this deque. This method differs from {@link #peek peek} only in that
* it throws an exception if this deque is empty.
- *
- * This method is equivalent to {@link #getFirst() getFirst}.
+ *
+ * This method is equivalent to {@link #getFirst() getFirst}.
*
* @return the head of the queue represented by this deque
* @throws NoSuchElementException if this deque is empty
@@ -528,8 +528,8 @@ public E peek() {
* (in the absence of memory or resource constraints) accept without
* blocking. This is always equal to the initial capacity of this deque
* less the current size of this deque.
- *
- * Note that you cannot always tell if an attempt to insert
+ *
+ * Note that you cannot always tell if an attempt to insert
* an element will succeed by inspecting remainingCapacity
* because it may be the case that another thread is about to
* insert or remove an element.
@@ -622,8 +622,8 @@ public E pop() {
* o.equals(e) (if such an element exists).
* Returns true if this deque contained the specified element
* (or equivalently, if this deque changed as a result of the call).
- *
- * This method is equivalent to
+ *
+ * This method is equivalent to
* {@link #removeFirstOccurrence(Object) removeFirstOccurrence}.
*
* @param o element to be removed from this deque, if present
@@ -671,12 +671,12 @@ public boolean contains(Object o) {
/**
* Returns an array containing all of the elements in this deque, in
* proper sequence (from first to last element).
- *
+ *
* The returned array will be "safe" in that no references to it are
* maintained by this deque. (In other words, this method must allocate
* a new array). The caller is thus free to modify the returned array.
- *
- * This method acts as bridge between array-based and collection-based
+ *
+ *
This method acts as bridge between array-based and collection-based
* APIs.
*
* @return an array containing all of the elements in this deque
@@ -697,24 +697,24 @@ public boolean contains(Object o) {
* the specified array. If the deque fits in the specified array, it
* is returned therein. Otherwise, a new array is allocated with the
* runtime type of the specified array and the size of this deque.
- *
+ *
* If this deque fits in the specified array with room to spare
* (i.e., the array has more elements than this deque), the element in
* the array immediately following the end of the deque is set to
* null.
- *
+ *
* Like the {@link #toArray()} method, this method acts as bridge between
* array-based and collection-based APIs. Further, this method allows
* precise control over the runtime type of the output array, and may,
* under certain circumstances, be used to save allocation costs.
- *
+ *
* Suppose x is a deque known to contain only strings.
* The following code can be used to dump the deque into a newly
* allocated array of String:
- *
+ *
*
* String[] y = x.toArray(new String[0]);
- *
+ *
* Note that toArray(new Object[0]) is identical in function to
* toArray().
*
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
index 047a55ccae..aaf9674621 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskExecutorService.java
@@ -76,15 +76,15 @@
* are available or when a higher priority task arrives and will schedule it for execution.
* When pre-emption is enabled, the tasks from wait queue can replace(pre-empt) a running task.
* The pre-empted task is reported back to the Application Master(AM) for it to be rescheduled.
- *
+ *
* Because of the concurrent nature of task submission, the position of the task in wait queue is
* held as long the scheduling of the task from wait queue (with or without pre-emption) is complete.
* The order of pre-emption is based on the ordering in the pre-emption queue. All tasks that cannot
* run to completion immediately (canFinish = false) are added to pre-emption queue.
- *
+ *
* When all the executor threads are occupied and wait queue is full, the task scheduler will
* return SubmissionState.REJECTED response
- *
+ *
* Task executor service can be shut down which will terminated all running tasks and reject all
* new tasks. Shutting down of the task executor service can be done gracefully or immediately.
*/
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index 7f436e2326..e86a96c1e6 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -344,7 +344,7 @@ private String constructThreadNameSuffix(TezTaskAttemptID taskAttemptId) {
/**
* Attempt to kill a running task. If the task has not started running, it will not start.
* If it's already running, a kill request will be sent to it.
- *
+ *
* The AM will be informed about the task kill.
*/
public void killTask() {
diff --git llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
index e14f3141d4..54b61358d1 100644
--- llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
+++ llap-server/src/java/org/apache/hadoop/hive/llap/metrics/LlapDaemonJvmInfo.java
@@ -24,7 +24,7 @@
/**
* Llap daemon JVM info. These are some additional metrics that are not exposed via
- * {@link org.apache.hadoop.metrics.jvm.JvmMetrics}
+ * {@link org.apache.hadoop.hive.common.JvmMetrics}
*
* NOTE: These metrics are for sinks supported by hadoop-metrics2. There is already a /jmx endpoint
* that gives all these info.
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 2b653a5d21..85f9f4355d 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -139,10 +139,6 @@ public DDLWork(HashSet inputs, HashSet outputs,
this.createVwDesc = createVwDesc;
}
- /**
- * @param dropTblDesc
- * drop table descriptor
- */
public DDLWork(HashSet inputs, HashSet outputs,
DropPartitionDesc dropPartitionDesc) {
this(inputs, outputs);
diff --git serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
index b26d3422f9..3cb21b7c3e 100644
--- serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
+++ serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataInputStream.java
@@ -108,8 +108,8 @@ public Timestamp readTimestamp(Integer byteNum) throws IOException {
* Read DATE.
* The representation of date in Teradata binary format is:
* The Date D is a int with 4 bytes using little endian,
- * The representation is (D+19000000).ToString -> YYYYMMDD,
- * eg: Date 07 b2 01 00 -> 111111 in little endian -> 19111111 - > 1911.11.11.
+ * The representation is (D+19000000).ToString -> YYYYMMDD,
+ * eg: Date 07 b2 01 00 -> 111111 in little endian -> 19111111 - > 1911.11.11.
* the null date will use 0 to pad.
*
* @return the date
@@ -135,7 +135,7 @@ public Date readDate() throws IOException, ParseException {
/**
* Read CHAR(N).
* The representation of char in Teradata binary format is
- * the byte number to read is based on the [charLength] * [bytePerChar] <- totalLength,
+ * the byte number to read is based on the [charLength] * [bytePerChar] <- totalLength,
* bytePerChar is decided by the charset: LATAIN charset is 2 bytes per char and UNICODE charset is 3 bytes per char.
* the null char will use space to pad.
*
diff --git serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
index f2f801dc8f..3799aa21b7 100644
--- serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
+++ serde/src/java/org/apache/hadoop/hive/serde2/teradata/TeradataBinaryDataOutputStream.java
@@ -138,8 +138,8 @@ public void writeDouble(double d) throws IOException {
* Write DATE.
* The representation of date in Teradata binary format is:
* The Date D is a int with 4 bytes using little endian.
- * The representation is (YYYYMMDD - 19000000).toInt -> D
- * eg. 1911.11.11 -> 19111111 -> 111111 -> 07 b2 01 00 in little endian.
+ * The representation is (YYYYMMDD - 19000000).toInt -> D
+ * eg. 1911.11.11 -> 19111111 -> 111111 -> 07 b2 01 00 in little endian.
* the null date will use 0 to pad.
*
* @param date the date
@@ -168,7 +168,7 @@ public void writeLong(long l) throws IOException {
/**
* Write CHAR(N).
* The representation of char in Teradata binary format is:
- * the byte number to read is based on the [charLength] * [bytePerChar] <- totalLength,
+ * the byte number to read is based on the [charLength] * [bytePerChar] <- totalLength,
* bytePerChar is decided by the charset: LATAIN charset is 2 bytes per char and UNICODE charset is 3 bytes per char.
* the null char will use space to pad.
*
diff --git service/src/java/org/apache/hive/service/Service.java service/src/java/org/apache/hive/service/Service.java
index 51ff7c184f..f98964188a 100644
--- service/src/java/org/apache/hive/service/Service.java
+++ service/src/java/org/apache/hive/service/Service.java
@@ -49,7 +49,7 @@
* The transition must be from {@link STATE#NOTINITED} to {@link STATE#INITED} unless the
* operation failed and an exception was raised.
*
- * @param config
+ * @param conf
* the configuration of the service
*/
void init(HiveConf conf);
diff --git service/src/java/org/apache/hive/service/ServiceOperations.java service/src/java/org/apache/hive/service/ServiceOperations.java
index 093bcab914..1b68ea48b4 100644
--- service/src/java/org/apache/hive/service/ServiceOperations.java
+++ service/src/java/org/apache/hive/service/ServiceOperations.java
@@ -51,7 +51,7 @@ public static void ensureCurrentState(Service.STATE state,
/**
* Initialize a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -69,7 +69,7 @@ public static void init(Service service, HiveConf configuration) {
/**
* Start a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -86,7 +86,7 @@ public static void start(Service service) {
/**
* Initialize then start a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -102,9 +102,9 @@ public static void deploy(Service service, HiveConf configuration) {
/**
* Stop a service.
- * Do nothing if the service is null or not
+ *
Do nothing if the service is null or not
* in a state in which it can be/needs to be stopped.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service or null
diff --git service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
index d18ac87a69..7dc11b27f9 100644
--- service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
+++ service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
@@ -78,7 +78,7 @@ public static String getKerberosServiceTicket(String principal, String host, Str
* @param clientUserName Client User name.
* @return An unsigned cookie token generated from input parameters.
* The final cookie generated is of the following format :
- * cu=&rn=&s=
+ * cu=<username>&rn=<randomNumber>&s=<cookieSignature>
*/
public static String createCookieToken(String clientUserName) {
StringBuilder sb = new StringBuilder();
diff --git service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
index 60e35b406d..fdc6857a92 100644
--- service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
+++ service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
@@ -26,7 +26,7 @@
* to authenticate users for their requests.
* If a user is to be granted, return nothing/throw nothing.
* When a user is to be disallowed, throw an appropriate {@link AuthenticationException}.
- *
+ *
* For an example implementation, see {@link LdapAuthenticationProviderImpl}.
*
* @param user The username received over the connection request
diff --git service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
index eb59642072..8e4659b895 100644
--- service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
+++ service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
@@ -31,12 +31,12 @@
/**
* This class is responsible for setting the ipAddress for operations executed via HiveServer2.
- *
+ *
*
* - IP address is only set for operations that calls listeners with hookContext
* - IP address is only set if the underlying transport mechanism is socket
*
- *
+ *
*
* @see org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext
*/
diff --git service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
index 200cb0172a..30ce1a6b6f 100644
--- service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
@@ -30,7 +30,7 @@
*
* The produced filter object filters out all users that are not found in the search result
* of the query provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
+ * @see org.apache.hadoop.hive.conf.HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
*/
public class CustomQueryFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
index 9165227ae7..5470ad790b 100644
--- service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
@@ -35,7 +35,7 @@
*
* The produced filter object filters out all users that are not members of at least one of
* the groups provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
*/
public final class GroupFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
index 5336c10fd7..d3caaefc6d 100644
--- service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
+++ service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
@@ -145,10 +145,10 @@ public static boolean isDn(String name) {
* @param conf Hive configuration
* @param var variable to be read
* @return a list of DN patterns
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_BASEDN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
*/
public static List parseDnPatterns(HiveConf conf, HiveConf.ConfVars var) {
String patternsString = conf.getVar(var);
@@ -183,8 +183,8 @@ private static String patternToBaseDn(String pattern) {
* Converts a collection of Distinguished Name patterns to a collection of base DNs.
* @param patterns Distinguished Name patterns
* @return a list of base DNs
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
*/
public static List patternsToBaseDns(Collection patterns) {
List result = new ArrayList<>();
diff --git service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
index aac1160aa6..5f0f3b6f19 100644
--- service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
+++ service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
@@ -147,7 +147,7 @@ public void handle(RecordProcessor processor) throws NamingException {
* Implementations of this interface perform the actual work of processing each record,
* but don't need to worry about exception handling, closing underlying data structures,
* and combining results from several search requests.
- * {@see SearchResultHandler}
+ * @see SearchResultHandler
*/
public interface RecordProcessor {
diff --git service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
index cb00aa9b16..c3bcfd9b4d 100644
--- service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
@@ -30,7 +30,7 @@
*
* The produced filter object filters out all users that are not on the provided in
* Hive configuration list.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERFILTER
*/
public final class UserFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
index 56ee54ca04..199e9025a7 100644
--- service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
+++ service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
@@ -31,7 +31,7 @@
* Convert a SQL search pattern into an equivalent Java Regex.
*
* @param pattern input which may contain '%' or '_' wildcard characters, or
- * these characters escaped using {@link #getSearchStringEscape()}.
+ * these characters escaped using getSearchStringEscape().
* @return replace %/_ with regex search characters, also handle escaped
* characters.
*/
diff --git service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
index ae7be23b81..aca169eb07 100644
--- service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
+++ service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
@@ -36,9 +36,9 @@
/**
* ClassicTableTypeMapping.
* Classic table type mapping :
- * Managed Table ==> Table
- * External Table ==> Table
- * Virtual View ==> View
+ * Managed Table ==> Table
+ * External Table ==> Table
+ * Virtual View ==> View
*/
public class ClassicTableTypeMapping implements TableTypeMapping {
diff --git service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
index eb8c712522..3f2a89b9dc 100644
--- service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
+++ service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
@@ -35,7 +35,7 @@
/**
* Map hive's table type name to client's table type
- * @param clientTypeName
+ * @param hiveTypeName
* @return
*/
public String mapToClientType (String hiveTypeName);
diff --git service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
index 805934f046..633bf89630 100644
--- service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
+++ service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
@@ -30,12 +30,12 @@
* in custom cleanup code to be called before this thread is GC-ed.
* Currently cleans up the following:
* 1. ThreadLocal RawStore object:
- * In case of an embedded metastore, HiveServer2 threads (foreground & background)
+ * In case of an embedded metastore, HiveServer2 threads (foreground & background)
* end up caching a ThreadLocal RawStore object. The ThreadLocal RawStore object has
- * an instance of PersistenceManagerFactory & PersistenceManager.
+ * an instance of PersistenceManagerFactory & PersistenceManager.
* The PersistenceManagerFactory keeps a cache of PersistenceManager objects,
* which are only removed when PersistenceManager#close method is called.
- * HiveServer2 uses ExecutorService for managing thread pools for foreground & background threads.
+ * HiveServer2 uses ExecutorService for managing thread pools for foreground & background threads.
* ExecutorService unfortunately does not provide any hooks to be called,
* when a thread from the pool is terminated.
* As a solution, we're using this ThreadFactory to keep a cache of RawStore objects per thread.
diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
index 3a827f7cde..76c97d594b 100644
--- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
+++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
@@ -35,7 +35,7 @@
*
*
*
- * Implementations can use {@link MetaStoreUtils#isExternalTable} to
+ * Implementations can use {@link org.apache.hadoop.hive.metastore.utils.MetaStoreUtils#isExternalTable} to
* distinguish external tables from managed tables.
*/
@InterfaceAudience.Public
diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index f67761eb6e..f7919e268d 100644
--- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -2708,7 +2708,6 @@ boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption)
throws MetaException, TException;
/**
- * @param revokePrivileges
* @param authorizer
* @param objToRefresh
* @return true on success
@@ -2894,7 +2893,7 @@ Function getFunction(String catName, String dbName, String funcName)
/**
* Get a structure that details valid write ids.
- * @param fullTableName full table name of format .
+ * @param fullTableName full table name of format <db_name>.<table_name>
* @return list of valid write ids for the given table
* @throws TException
*/
@@ -2902,7 +2901,7 @@ Function getFunction(String catName, String dbName, String funcName)
/**
* Get a structure that details valid write ids.
- * @param fullTableName full table name of format .
+ * @param fullTableName full table name of format <db_name>.<table_name>
* @param writeId The write id to get the corresponding txn
* @return list of valid write ids for the given table
* @throws TException
@@ -2911,7 +2910,7 @@ Function getFunction(String catName, String dbName, String funcName)
/**
* Get a structure that details valid write ids list for all tables read by current txn.
- * @param tablesList list of tables (format: .) read from the current transaction
+ * @param tablesList list of tables (format: <db_name>.<table_name>) read from the current transaction
* for which needs to populate the valid write ids
* @param validTxnList snapshot of valid txns for the current txn
* @return list of valid write ids for the given list of tables.
diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
index ecd59961a9..6c17c86106 100755
--- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -590,7 +590,7 @@ public static boolean makeSpecFromName(Map partSpec, Path currPa
* pairs to create the Path for the partition directory
*
* @param db - parent database which is used to get the base location of the partition directory
- * @param tableName - table name for the partitions
+ * @param table - table for the partitions
* @param pm - Partition key value pairs
* @return
* @throws MetaException
diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
index da70dbcede..2ed314b4d7 100644
--- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
+++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/FilterUtils.java
@@ -325,7 +325,6 @@ public static Catalog filterCatalogIfEnabled(
* could improve performance when filtering partitions.
* @param dbName the database name
* @param tblName the table name contained in the database
- * @return if the
* @throws NoSuchObjectException if the database or table is filtered out
*/
public static void checkDbAndTableFilters(boolean isFilterEnabled,
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index f4e0c41b7c..d903a912b9 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -35,7 +35,7 @@
/**
* @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
- * String, String, Table, EnvironmentContext, IHMSHandler)}
+ * String, String, Table, EnvironmentContext, IHMSHandler, String)}
*
* handles alter table, the changes could be cascaded to partitions if applicable
*
@@ -92,8 +92,8 @@ void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
throws InvalidOperationException, MetaException;
/**
- * @deprecated As of release 2.2.0. Replaced by {@link #alterPartition(RawStore, Warehouse, String,
- * String, List, Partition, EnvironmentContext, IHMSHandler)}
+ * @deprecated As of release 2.2.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+ * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
*
* handles alter partition
*
@@ -151,7 +151,7 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName
/**
* @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
- * String, String, List, EnvironmentContext, IHMSHandler)}
+ * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
*
* handles alter partitions
*
@@ -201,4 +201,4 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName
EnvironmentContext environmentContext, String writeIdList, long writeId,
IHMSHandler handler)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
-}
\ No newline at end of file
+}
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
index dd31226dca..da7478f13f 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockHandler.java
@@ -32,9 +32,9 @@
* This is a lock handler implementation for the materializations rebuild.
* It is lightweight: it does not persist any information to metastore db.
* Its states are as follows:
- * 1) request lock -> 2) ACQUIRED -> 4) COMMIT_READY -> 6) release lock
- * -> 5) EXPIRED ->
- * -> 3) NOT_ACQUIRED
+ * 1) request lock -> 2) ACQUIRED -> 4) COMMIT_READY -> 6) release lock
+ * -> 5) EXPIRED ->
+ * -> 3) NOT_ACQUIRED
* First, the rebuild operation will ACQUIRE the lock. If other rebuild
* operation for the same operation is already running, we lock status
* will be NOT_ACQUIRED.
@@ -107,7 +107,6 @@ public boolean readyToCommitResource(String dbName, String tableName, long txnId
* @param dbName the db name of the materialization
* @param tableName the table name of the materialization
* @param txnId the transaction id for the rebuild
- * @throws MetaException
*/
public boolean refreshLockResource(String dbName, String tableName, long txnId) {
final ResourceLock prevResourceLock = locks.get(Warehouse.getQualifiedName(dbName, tableName));
@@ -127,7 +126,6 @@ public boolean refreshLockResource(String dbName, String tableName, long txnId)
* @param tableName the table name of the materialization
* @param txnId the transaction id for the rebuild
* @return true if the lock could be released properly, false otherwise
- * @throws MetaException
*/
public boolean unlockResource(String dbName, String tableName, long txnId) {
final String fullyQualifiedName = Warehouse.getQualifiedName(dbName, tableName);
@@ -141,7 +139,6 @@ public boolean unlockResource(String dbName, String tableName, long txnId) {
/**
* Method that removes from the handler those locks that have expired.
* @param timeout time after which we consider the locks to have expired
- * @throws MetaException
*/
public long cleanupResourceLocks(long timeout) {
long removed = 0L;
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
index 2837ff4a47..5f02a40746 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java
@@ -29,10 +29,10 @@
/**
- * PartitionIterable - effectively a lazy Iterable
+ * PartitionIterable - effectively a lazy Iterable<Partition>
* Sometimes, we have a need for iterating through a list of partitions,
* but the list of partitions can be too big to fetch as a single object.
- * Thus, the goal of PartitionIterable is to act as an Iterable
+ * Thus, the goal of PartitionIterable is to act as an Iterable<Partition>
* while lazily fetching each relevant partition, one after the other as
* independent metadata calls.
* It is very likely that any calls to PartitionIterable are going to result
@@ -133,7 +133,7 @@ public void remove() {
/**
* Dummy constructor, which simply acts as an iterator on an already-present
* list of partitions, allows for easy drop-in replacement for other methods
- * that already have a List
+ * that already have a List<Partition>
*/
public PartitionIterable(Collection ptnsProvided) {
this.currType = Type.LIST_PROVIDED;
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 03a116aa1d..8c1ab739de 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -280,7 +280,6 @@ Partition getPartition(String catName, String dbName, String tableName,
* @param dbName database name.
* @param tableName table name.
* @param part_vals partition values for this table.
- * @param txnId transaction id of the calling transaction
* @param writeIdList string format of valid writeId transaction list
* @return the partition.
* @throws MetaException error reading from RDBMS.
@@ -517,9 +516,8 @@ Partition alterPartition(String catName, String db_name, String tbl_name, List updatePartitionColumnStatistics(ColumnStatistics statsObj,
List partVals, String validWriteIds, long writeId)
@@ -936,7 +933,6 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String
* @param dbName name of the database, defaults to current database
* @param tableName name of the table
* @param colName names of the columns for which statistics is requested
- * @param txnId transaction id of the calling transaction
* @param writeIdList string format of valid writeId transaction list
* @return Relevant column statistics for the column for the given table
* @throws NoSuchObjectException No such table
@@ -970,7 +966,6 @@ ColumnStatistics getTableColumnStatistics(
* @param tblName table name.
* @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...]
* @param colNames list of columns to get stats for
- * @param txnId transaction id of the calling transaction
* @param writeIdList string format of valid writeId transaction list
* @return list of statistics objects
* @throws MetaException error accessing the RDBMS
@@ -1233,7 +1228,6 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
* @param partNames list of partition names. These are the names of the partitions, not
* values.
* @param colNames list of column names
- * @param txnId transaction id of the calling transaction
* @param writeIdList string format of valid writeId transaction list
* @return aggregated stats
* @throws MetaException error accessing RDBMS
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
index 547c43e388..d935ed1af4 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/OpenTxnEvent.java
@@ -43,7 +43,7 @@ public OpenTxnEvent(List txnIds, IHMSHandler handler) {
}
/**
- * @return List txnIds
+ * @return List<Long> txnIds
*/
public List getTxnIds() {
return txnIds;
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
index ea70503988..ba45f39452 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java
@@ -51,8 +51,8 @@
public String properties;
public boolean tooManyAborts = false;
/**
+ * The highest write id that the compaction job will pay attention to.
* {@code 0} means it wasn't set (e.g. in case of upgrades, since ResultSet.getLong() will return 0 if field is NULL)
- * See {@link TxnStore#setCompactionHighestWriteId(CompactionInfo, long)} for precise definition.
* See also {@link TxnUtils#createValidCompactWriteIdList(TableValidWriteIds)} and
* {@link ValidCompactorWriteIdList#highWatermark}.
*/
diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index cd77b4e39d..da38a6bbd3 100644
--- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -124,7 +124,7 @@ public static TxnStore getTxnStore(Configuration conf) {
* Note, users are responsible for using the correct TxnManager. We do not look at
* SessionState.get().getTxnMgr().supportsAcid() here
* Should produce the same result as
- * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+ * org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable.
* @return true if table is a transactional table, false otherwise
*/
public static boolean isTransactionalTable(Table table) {
@@ -147,7 +147,7 @@ public static boolean isTransactionalTable(Map parameters) {
/**
* Should produce the same result as
- * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+ * org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable.
*/
public static boolean isAcidTable(Table table) {
return TxnUtils.isTransactionalTable(table) &&
@@ -156,7 +156,7 @@ public static boolean isAcidTable(Table table) {
}
/**
- * Should produce the result as ..
+ * Should produce the result as <dbName>.<tableName>.
*/
public static String getFullTableName(String dbName, String tableName) {
return dbName.toLowerCase() + "." + tableName.toLowerCase();
diff --git storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
index e4aa888f67..d7de361938 100644
--- storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
+++ storage-api/src/java/org/apache/hadoop/hive/common/io/FileMetadataCache.java
@@ -57,8 +57,6 @@ MemoryBufferOrBuffers putFileMetadata(
/**
* Puts the metadata for a given file (e.g. a footer buffer into cache).
* @param fileKey The file key.
- * @param length The footer length.
- * @param is The stream to read the footer from.
* @return The buffer or buffers representing the cached footer.
* The caller must decref this buffer when done.
*/
diff --git streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
index a32aa62bbc..fa7e079331 100644
--- streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
+++ streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java
@@ -63,12 +63,12 @@
* To bind to the correct metastore, HiveConf object has to be created from hive-site.xml or HIVE_CONF_DIR.
* If hive conf is manually created, metastore uri has to be set correctly. If hive conf object is not specified,
* "thrift://localhost:9083" will be used as default.
- *
+ *
* NOTE: The streaming connection APIs and record writer APIs are not thread-safe. Streaming connection creation,
* begin/commit/abort transactions, write and close has to be called in the same thread. If close() or
* abortTransaction() has to be triggered from a separate thread it has to be co-ordinated via external variables or
* synchronization mechanism
- *
+ *
* Example usage:
* {@code
* // create delimited record writer whose schema exactly matches table schema
diff --git streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
index c0ee03426e..01c1164d58 100644
--- streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
+++ streaming/src/java/org/apache/hive/streaming/StreamingTransaction.java
@@ -119,7 +119,7 @@ void commit(@Nullable Set partitions, @Nullable String key,
Set getPartitions();
/**
- * @return get the paris for transaction ids <--> write ids
+ * @return get the paris for transaction ids <--> write ids
*/
List getTxnToWriteIds();
}
diff --git testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
index cd6cce70c7..005119ae2f 100644
--- testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
+++ testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
@@ -34,7 +34,7 @@
/**
* Creates 2 tables to query from
*
- * @param num
+ * @param con
*/
public static void createTables(Connection con) throws SQLException {
Statement stmt = con.createStatement();