diff --git accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
index 718a5c5..7730783 100644
--- accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
+++ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java
@@ -134,7 +134,7 @@ public static AccumuloPredicateHandler getInstance() {
*
* @param udfType
* GenericUDF classname to lookup matching CompareOpt
- * @return Class extends CompareOpt/>
+ * @return Class<? extends CompareOpt/>
*/
public Class extends CompareOp> getCompareOpClass(String udfType)
throws NoSuchCompareOpException {
@@ -166,7 +166,7 @@ public CompareOp getCompareOp(String udfType, IndexSearchCondition sc)
*
* @param type
* String hive column lookup matching PrimitiveCompare
- * @return Class extends >?>
+ * @return Class<? extends ></?>
*/
public Class extends PrimitiveComparison> getPrimitiveComparisonClass(String type)
throws NoSuchPrimitiveComparisonException {
diff --git accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
index f3ebbd1..2367ca2 100644
--- accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
+++ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java
@@ -33,21 +33,21 @@
* AccumuloCompositeKey extension of LazyStruct. All complex composite keys should extend this class
* and override the {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID of a
* key in the composite key.
- *
+ *
* For example, for a composite key "/part1/part2/part3", part1 will have an id
* 0, part2 will have an id 1 and part3 will have an id 2. Custom
* implementations of getField(fieldID) should return the value corresponding to that fieldID. So,
- * for the above example, the value returned for getField(0) should be part1,
+ * for the above example, the value returned for getField(0) should be part1,
* getField(1) should be part2 and getField(2) should be part3.
- *
+ *
*
- *
+ *
* All custom implementation are expected to have a constructor of the form:
*
*
* MyCustomCompositeKey(LazySimpleStructObjectInspector oi, Properties tbl, Configuration conf)
*
- *
+ *
*
*/
public class AccumuloCompositeRowId extends LazyStruct {
diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 4ff4beb..029508b 100644
--- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -168,7 +168,7 @@ private NestedScriptParser getDbCommandParser(String dbType) {
/***
* Print Hive version and schema version
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void showInfo() throws HiveMetaException {
String hiveVersion = metaStoreSchemaInfo.getHiveSchemaVersion();
@@ -463,7 +463,7 @@ private void testConnectionToMetastore() throws HiveMetaException {
/**
* check if the current schema version in metastore matches the Hive version
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void verifySchemaVersion() throws HiveMetaException {
// don't check version if its a dry run
@@ -485,7 +485,7 @@ private void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVe
/**
* Perform metastore schema upgrade. extract the current schema version from metastore
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void doUpgrade() throws HiveMetaException {
String fromVersion =
@@ -506,7 +506,7 @@ private MetaStoreConnectionInfo getConnectionInfo(boolean printInfo) {
*
* @param fromSchemaVer
* Existing version of the metastore. If null, then read from the metastore
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void doUpgrade(String fromSchemaVer) throws HiveMetaException {
if (metaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) {
@@ -541,7 +541,7 @@ public void doUpgrade(String fromSchemaVer) throws HiveMetaException {
/**
* Initialize the metastore schema to current version
*
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void doInit() throws HiveMetaException {
doInit(metaStoreSchemaInfo.getHiveSchemaVersion());
@@ -555,7 +555,7 @@ public void doInit() throws HiveMetaException {
*
* @param toVersion
* If null then current hive version is used
- * @throws MetaException
+ * @throws HiveMetaException
*/
public void doInit(String toVersion) throws HiveMetaException {
testConnectionToMetastore();
diff --git contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
index 0c13f5e..d76aa7d 100644
--- contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
+++ contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
@@ -41,7 +41,7 @@
* As an example, here's the wordcount reduce:
*
* new GenericMR().reduce(System.in, System.out, new Reducer() { public void
- * reduce(String key, Iterator records, Output output) throws
+ * reduce(String key, Iterator<String[]> records, Output output) throws
* Exception { int count = 0;
*
* while (records.hasNext()) { count += Integer.parseInt(records.next()[1]); }
diff --git contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
index f27b0c7..546dded 100644
--- contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
+++ contrib/src/java/org/apache/hadoop/hive/contrib/serde2/RegexSerDe.java
@@ -61,8 +61,8 @@
* into a row. If the output type of the column in a query is not a string, it
* will be automatically converted to String by Hive.
*
- * For the format of the format String, please refer to {@link http
- * ://java.sun.com/j2se/1.5.0/docs/api/java/util/Formatter.html#syntax}
+ * For the format of the format String, please refer to link: http
+ * ://java.sun.com/j2se/1.5.0/docs/api/java/util/Formatter.html#syntax
*
* NOTE: Obviously, all columns have to be strings. Users can use
* "CAST(a AS INT)" to convert columns to other types.
diff --git contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
index 7806466..d8709be 100644
--- contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
+++ contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java
@@ -44,7 +44,7 @@
* by a primitive.
*
* The internal state can also contains fields with types like
- * ArrayList and HashMap if needed.
+ * ArrayList<String> and HashMap<String,Double> if needed.
*/
public static class UDAFAvgState {
private long mCount;
diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
index 936e22d..98195ca 100644
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseCompositeKey.java
@@ -35,22 +35,21 @@
* HBaseCompositeKey extension of LazyStruct. All complex composite keys should extend this class
* and override the {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID of a
* key in the composite key.
- *
+ *
* For example, for a composite key "/part1/part2/part3", part1 will have an id
* 0, part2 will have an id 1 and part3 will have an id 2. Custom
* implementations of getField(fieldID) should return the value corresponding to that fieldID. So,
- * for the above example, the value returned for getField(0) should be part1,
+ * for the above example, the value returned for getField(0) should be part1,
* getField(1) should be part2 and getField(2) should be part3.
- *
*
- *
+ *
+ *
* All custom implementation are expected to have a constructor of the form:
*
*
* MyCustomCompositeKey(LazySimpleStructObjectInspector oi, Properties tbl, Configuration conf)
*
*
- *
*
* */
public class HBaseCompositeKey extends LazyStruct {
diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
index c2e7808..de6b3a6 100644
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java
@@ -154,7 +154,7 @@ public static ColumnMappings parseColumnsMapping(
* @param columnsMappingSpec string hbase.columns.mapping specified when creating table
* @param doColumnRegexMatching whether to do a regex matching on the columns or not
* @param hideColumnPrefix whether to hide a prefix of column mapping in key name in a map (works only if @doColumnRegexMatching is true)
- * @return List which contains the column mapping information by position
+ * @return List<ColumnMapping> which contains the column mapping information by position
* @throws org.apache.hadoop.hive.serde2.SerDeException
*/
public static ColumnMappings parseColumnsMapping(
diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
index 4b8f62c..55bb710 100644
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableOutputFormat.java
@@ -46,7 +46,7 @@
/**
* HiveHBaseTableOutputFormat implements HiveOutputFormat for HBase tables.
- * We also need to implement the @deprecated org.apache.hadoop.mapred.OutFormat,?>
+ * We also need to implement the @deprecated org.apache.hadoop.mapred.OutFormat<?,?>
* class to keep it compliant with Hive interfaces.
*/
public class HiveHBaseTableOutputFormat extends
@@ -60,15 +60,7 @@
* Update the out table, and output an empty key as the key.
*
* @param jc the job configuration file
- * @param finalOutPath the final output table name
- * @param valueClass the value class
- * @param isCompressed whether the content is compressed or not
- * @param tableProperties the table info of the corresponding table
- * @param progress progress used for status report
- * @return the RecordWriter for the output file
*/
-
-
@Override
public void checkOutputSpecs(FileSystem fs, JobConf jc) throws IOException {
diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
index 8fba79b..28627ee 100644
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java
@@ -32,21 +32,20 @@
* This is an extension of LazyStruct. All value structs should extend this class and override the
* {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID of a value in the
* value structure.
- *
+ *
* For example, for a value structure "/part1/part2/part3", part1 will have an id
* 0, part2 will have an id 1 and part3 will have an id 2. Custom
* implementations of getField(fieldID) should return the value corresponding to that fieldID. So,
- * for the above example, the value returned for getField(0) should be part1,
+ * for the above example, the value returned for getField(0) should be part1,
* getField(1) should be part2 and getField(2) should be part3.
- *
- *
+ *
+ *
* All implementation are expected to have a constructor of the form
*
*
* MyCustomStructObject(LazySimpleStructObjectInspector oi, Properties props, Configuration conf, ColumnMapping colMap)
*
*
- *
* */
public class HBaseStructValue extends LazyStruct {
diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
index 3fead1e..e660a2a 100644
--- hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
+++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java
@@ -38,7 +38,7 @@
/**
* Initialize factory with properties
*
- * @param hbaseParam the {@link HBaseParameters hbase parameters}
+ * @param hbaseParam the HBaseParameters hbase parameters
* @param conf the hadoop {@link Configuration configuration}
* @param properties the custom {@link Properties}
* @throws SerDeException if there was an issue initializing the factory
@@ -67,7 +67,7 @@ void init(HBaseSerDeParameters hbaseParam, Configuration conf, Properties proper
* @param object the object to be serialized
* @param field the {@link StructField}
* @return the serialized value
- * @throws {@link IOException} if there was an issue serializing the value
+ * @throws IOException if there was an issue serializing the value
*/
byte[] serializeValue(Object object, StructField field) throws IOException;
}
\ No newline at end of file
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
index 3998407..924fd7c 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java
@@ -186,7 +186,7 @@ private HCatConstants() { // restrict instantiation
/**
* {@value} (default: {@value #HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT}).
- * Pig < 0.10.0 does not have boolean support, and scripts written for pre-boolean Pig versions
+ * Pig < 0.10.0 does not have boolean support, and scripts written for pre-boolean Pig versions
* will not expect boolean values when upgrading Pig. For integration the option is offered to
* convert boolean fields to integers by setting this Hadoop configuration key.
*/
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
index 8b927af..4cb4d39 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java
@@ -289,11 +289,11 @@ public static HCatSchema getPartitionColumns(Table table) throws IOException {
* Test if the first FsAction is more permissive than the second. This is
* useful in cases where we want to ensure that a file owner has more
* permissions than the group they belong to, for eg. More completely(but
- * potentially more cryptically) owner-r >= group-r >= world-r : bitwise
- * and-masked with 0444 => 444 >= 440 >= 400 >= 000 owner-w >= group-w >=
- * world-w : bitwise and-masked with &0222 => 222 >= 220 >= 200 >= 000
- * owner-x >= group-x >= world-x : bitwise and-masked with &0111 => 111 >=
- * 110 >= 100 >= 000
+ * potentially more cryptically) owner-r >= group-r >= world-r : bitwise
+ * and-masked with 0444 => 444 >= 440 >= 400 >= 000 owner-w >= group-w >=
+ * world-w : bitwise and-masked with &0222 => 222 >= 220 >= 200 >= 000
+ * owner-x >= group-x >= world-x : bitwise and-masked with &0111 => 111 >=
+ * 110 >= 100 >= 000
*
* @return true if first FsAction is more permissive than the second, false
* if not.
@@ -563,9 +563,9 @@ public static IMetaStoreClient getHiveMetastoreClient(HiveConf hiveConf)
/**
* Get or create a hive client depending on whether it exits in cache or not.
- * @Deprecated : use {@link #getHiveMetastoreClient(HiveConf)} instead.
+ * @deprecated : use {@link #getHiveMetastoreClient(HiveConf)} instead.
* This was deprecated in Hive 1.2, slated for removal in two versions
- * (i.e. 1.2 & 1.3(projected) will have it, but it will be removed after that)
+ * (i.e. 1.2 & 1.3(projected) will have it, but it will be removed after that)
* @param hiveConf The hive configuration
* @return the client
* @throws MetaException When HiveMetaStoreClient couldn't be created
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
index da6ad5b..694771d 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/HCatWriter.java
@@ -58,7 +58,7 @@
*
* @param recordItr
* {@link Iterator} records to be written into HCatalog.
- * @throws {@link HCatException}
+ * @throws HCatException
*/
public abstract void write(final Iterator recordItr)
throws HCatException;
@@ -67,7 +67,7 @@ public abstract void write(final Iterator recordItr)
* This method should be called at master node. Primary purpose of this is to
* do metadata commit.
*
- * @throws {@link HCatException}
+ * @throws HCatException
*/
public abstract void commit(final WriterContext context) throws HCatException;
@@ -75,7 +75,7 @@ public abstract void write(final Iterator recordItr)
* This method should be called at master node. Primary purpose of this is to
* do cleanups in case of failures.
*
- * @throws {@link HCatException} *
+ * @throws HCatException
*/
public abstract void abort(final WriterContext context) throws HCatException;
diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
index f7c2f44..f72be28 100644
--- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
+++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
@@ -151,7 +151,7 @@ public void onAlterPartition(AlterPartitionEvent ape) throws MetaException {
* particular table by listening on a topic named "dbName.tableName" with message selector
* string {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_PARTITION_EVENT}.
- *
+ *
* TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for persistence, has been
* found to throw NPE when serializing objects that contain null. For this reason we override
* some fields in the StorageDescriptor of this notification. This should be fixed after
@@ -269,7 +269,7 @@ public void onAlterTable(AlterTableEvent tableEvent) throws MetaException {
* dropped tables by listening on topic "HCAT" with message selector string
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_EVENT} =
* {@value org.apache.hive.hcatalog.common.HCatConstants#HCAT_DROP_TABLE_EVENT}
- *
+ *
* TODO: DataNucleus 2.0.3, currently used by the HiveMetaStore for persistence, has been
* found to throw NPE when serializing objects that contain null. For this reason we override
* some fields in the StorageDescriptor of this notification. This should be fixed after
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
index 58fba4f..096d198 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/DelimitedInputWriter.java
@@ -45,7 +45,7 @@
/**
* Streaming Writer handles delimited input (eg. CSV).
- * Delimited input is parsed & reordered to match column order in table
+ * Delimited input is parsed & reordered to match column order in table
* Uses Lazy Simple Serde to process delimited input
*/
public class DelimitedInputWriter extends AbstractRecordWriter {
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
index 1ad0842..cb437b3 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorCoordinator.java
@@ -46,7 +46,7 @@
* previously closed. The {@link MutatorCoordinator} will seamlessly handle transitions between groups, creating and
* closing {@link Mutator Mutators} as needed to write to the appropriate partition and bucket. New partitions will be
* created in the meta store if {@link AcidTable#createPartitions()} is set.
- *
+ *
* {@link #insert(List, Object) Insert} events must be artificially assigned appropriate bucket ids in the preceding
* grouping phase so that they are grouped correctly. Note that any transaction id or row id assigned to the
* {@link RecordIdentifier RecordIdentifier} of such events will be ignored by both the coordinator and the underlying
diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
index edabbef..9f9c237 100644
--- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
+++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClient.java
@@ -186,7 +186,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for HCatTable.
* @param hcatTable The HCatTable to be serialized into string form
* @return String representation of the HCatTable.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract String serializeTable(HCatTable hcatTable) throws HCatException;
@@ -202,7 +202,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for HCatPartition.
* @param hcatPartition The HCatPartition instance to be serialized.
* @return String representation of the HCatPartition.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract String serializePartition(HCatPartition hcatPartition) throws HCatException;
@@ -210,7 +210,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Serializer for a list of HCatPartition.
* @param hcatPartitions The HCatPartitions to be serialized.
* @return A list of Strings, each representing an HCatPartition.
- * @throws HCatException, on failure to serialize.
+ * @throws HCatException on failure to serialize.
*/
public abstract List serializePartitions(List hcatPartitions) throws HCatException;
@@ -218,7 +218,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Deserializer for an HCatPartition.
* @param hcatPartitionStringRep The String representation of the HCatPartition, presumably retrieved from {@link #serializePartition(HCatPartition)}
* @return HCatPartition instance reconstructed from the string.
- * @throws HCatException, on failure to deserialze.
+ * @throws HCatException on failure to deserialze.
*/
public abstract HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException;
@@ -226,7 +226,7 @@ public abstract void updateTableSchema(String dbName, String tableName, HCatTabl
* Deserializer for a list of HCatPartition strings.
* @param hcatPartitionStringReps The list of HCatPartition strings to be deserialized.
* @return A list of HCatPartition instances, each reconstructed from an entry in the string-list.
- * @throws HCatException, on failure to deserialize.
+ * @throws HCatException on failure to deserialize.
*/
public abstract List deserializePartitions(List hcatPartitionStringReps) throws HCatException;
@@ -387,7 +387,8 @@ public abstract int addPartitionSpec(HCatPartitionSpec partitionSpec)
* @param tableName The table name.
* @param partitionSpec The partition specification, {[col_name,value],[col_name2,value2]}.
* @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true.
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract void dropPartitions(String dbName, String tableName,
Map partitionSpec, boolean ifExists)
@@ -406,7 +407,8 @@ public abstract void dropPartitions(String dbName, String tableName,
* @param partitionSpec The partition specification, {[col_name,value],[col_name2,value2]}.
* @param ifExists Hive returns an error if the partition specified does not exist, unless ifExists is set to true.
* @param deleteData Whether to delete the underlying data.
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract void dropPartitions(String dbName, String tableName,
Map partitionSpec, boolean ifExists, boolean deleteData)
@@ -417,7 +419,7 @@ public abstract void dropPartitions(String dbName, String tableName,
* @param dbName The database name.
* @param tblName The table name.
* @param filter The filter string,
- * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
+ * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can
* be done only on string partition keys.
* @return list of partitions
* @throws HCatException
@@ -466,7 +468,8 @@ public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName,
* @param owner the owner
* @param renewerKerberosPrincipalName the renewer kerberos principal name
* @return the delegation token
- * @throws HCatException,ConnectionFailureException
+ * @throws HCatException
+ * @throws ConnectionFailureException
*/
public abstract String getDelegationToken(String owner,
String renewerKerberosPrincipalName) throws HCatException;
@@ -496,7 +499,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param dbName The name of the DB.
* @param tableName The name of the table.
* @return Topic-name for the message-bus on which messages will be sent for the specified table.
- * By default, this is set to .. Returns null when not set.
+ * By default, this is set to <db-name>.<table-name>. Returns null when not set.
*/
public abstract String getMessageBusTopicName(String dbName, String tableName) throws HCatException;
@@ -507,7 +510,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param lastEventId : The last event id that was processed for this reader. The returned
* replication tasks will start from this point forward
* @param maxEvents : Maximum number of events to consider for generating the
- * replication tasks. If < 1, then all available events will be considered.
+ * replication tasks. If < 1, then all available events will be considered.
* @param dbName : The database name for which we're interested in the events for.
* @param tableName : The table name for which we're interested in the events for - if null,
* then this function will behave as if it were running at a db level.
@@ -523,7 +526,7 @@ public abstract void cancelDelegationToken(String tokenStrForm)
* @param lastEventId The last event id that was consumed by this reader. The returned
* notifications will start at the next eventId available this eventId that
* matches the filter.
- * @param maxEvents Maximum number of events to return. If < 1, then all available events will
+ * @param maxEvents Maximum number of events to return. If < 1, then all available events will
* be returned.
* @param filter Filter to determine if message should be accepted. If null, then all
* available events up to maxEvents will be returned.
diff --git hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
index c5a3028..795b010 100644
--- hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
+++ hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/repl/ReplicationTask.java
@@ -178,7 +178,7 @@ public ReplicationTask withDstStagingDirProvider(StagingDirectoryProvider dstSta
* throws an IllegalArgumentException as well, a ReplicationTask will use the same key sent in.
* That way, the default will then be that the destination db name is the same as the src db name
*
- * If you want to use a Map mapping instead of a Function,
+ * If you want to use a Map<String,String> mapping instead of a Function<String,String>,
* simply call this function as .withTableNameMapping(ReplicationUtils.mapBasedFunction(tableMap))
* @param tableNameMapping
* @return this replication task
@@ -195,7 +195,7 @@ public ReplicationTask withTableNameMapping(Function tableNameMap
* throws an IllegalArgumentException as well, a ReplicationTask will use the same key sent in.
* That way, the default will then be that the destination db name is the same as the src db name
*
- * If you want to use a Map mapping instead of a Function,
+ * If you want to use a Map<String,String> mapping instead of a Function<String,String>,
* simply call this function as .withDbNameMapping(ReplicationUtils.mapBasedFunction(dbMap))
* @param dbNameMapping
* @return this replication task
@@ -212,9 +212,9 @@ protected void verifyActionable() {
}
/**
- * Returns a Iterable to send to a hive driver on the source warehouse
+ * Returns a Iterable<Command> to send to a hive driver on the source warehouse
*
- * If you *need* a List instead, you can use guava's
+ * If you *need* a List<Command> instead, you can use guava's
* ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
* get the underlying list, but this defeats the purpose of making this
* interface an Iterable rather than a List, since it is very likely
@@ -224,9 +224,9 @@ protected void verifyActionable() {
abstract public Iterable extends Command> getSrcWhCommands();
/**
- * Returns a Iterable to send to a hive driver on the source warehouse
+ * Returns a Iterable<Command> to send to a hive driver on the source warehouse
*
- * If you *need* a List instead, you can use guava's
+ * If you *need* a List<Command> instead, you can use guava's
* ImmutableList.copyOf(iterable) or Lists.newArrayList(iterable) to
* get the underlying list, but this defeats the purpose of making this
* interface an Iterable rather than a List, since it is very likely
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Meta.java hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
index 46bd55a..0ccbb54 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Meta.java
@@ -190,7 +190,7 @@ public String normalizeIdentifierPart(String name) {
}
/**
- * Split qualified object to 2 parts: schema.tab.col -> schema.tab|col; tab.col -> tab|col
+ * Split qualified object to 2 parts: schema.tab.col -> schema.tab|col; tab.col -> tab|col
*/
public ArrayList splitIdentifierToTwoParts(String name) {
ArrayList parts = splitIdentifier(name);
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Utils.java hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
index d261df1..211e9b2 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Utils.java
@@ -52,7 +52,7 @@ public static String unquoteString(String s) {
}
/**
- * Quote string and escape characters - ab'c -> 'ab''c'
+ * Quote string and escape characters - ab'c -> 'ab''c'
*/
public static String quoteString(String s) {
if(s == null) {
@@ -73,7 +73,7 @@ public static String quoteString(String s) {
}
/**
- * Merge quoted strings: 'a' 'b' -> 'ab'; 'a''b' 'c' -> 'a''bc'
+ * Merge quoted strings: 'a' 'b' -> 'ab'; 'a''b' 'c' -> 'a''bc'
*/
public static String mergeQuotedStrings(String s1, String s2) {
if(s1 == null || s2 == null) {
diff --git hplsql/src/main/java/org/apache/hive/hplsql/Var.java hplsql/src/main/java/org/apache/hive/hplsql/Var.java
index 480d97c..91840af 100644
--- hplsql/src/main/java/org/apache/hive/hplsql/Var.java
+++ hplsql/src/main/java/org/apache/hive/hplsql/Var.java
@@ -606,7 +606,7 @@ else if (type == Type.TIMESTAMP) {
}
/**
- * Convert value to SQL string - string literals are quoted and escaped, ab'c -> 'ab''c'
+ * Convert value to SQL string - string literals are quoted and escaped, ab'c -> 'ab''c'
*/
public String toSqlString() {
if (value == null) {
diff --git llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
index 4a83141..9e5415f 100644
--- llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
+++ llap-ext-client/src/java/org/apache/hadoop/hive/llap/LlapDump.java
@@ -36,7 +36,7 @@
/**
* Utility to test query and data retrieval via the LLAP input format.
- * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf hive.zookeeper.client.port=2181 --hiveconf hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * from employee where employee_id < 10'
+ * llapdump --hiveconf hive.zookeeper.quorum=localhost --hiveconf hive.zookeeper.client.port=2181 --hiveconf hive.llap.daemon.service.hosts=@llap_MiniLlapCluster 'select * from employee where employee_id < 10'
*
*/
public class LlapDump {
diff --git service/src/java/org/apache/hive/service/Service.java service/src/java/org/apache/hive/service/Service.java
index 2111837..c9b5d03 100644
--- service/src/java/org/apache/hive/service/Service.java
+++ service/src/java/org/apache/hive/service/Service.java
@@ -49,7 +49,7 @@
* The transition must be from {@link STATE#NOTINITED} to {@link STATE#INITED} unless the
* operation failed and an exception was raised.
*
- * @param config
+ * @param conf
* the configuration of the service
*/
void init(HiveConf conf);
diff --git service/src/java/org/apache/hive/service/ServiceOperations.java service/src/java/org/apache/hive/service/ServiceOperations.java
index 97858f5..17d0dc6 100644
--- service/src/java/org/apache/hive/service/ServiceOperations.java
+++ service/src/java/org/apache/hive/service/ServiceOperations.java
@@ -51,7 +51,7 @@ public static void ensureCurrentState(Service.STATE state,
/**
* Initialize a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -69,7 +69,7 @@ public static void init(Service service, HiveConf configuration) {
/**
* Start a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -86,7 +86,7 @@ public static void start(Service service) {
/**
* Initialize then start a service.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service that must be in the state
@@ -102,9 +102,9 @@ public static void deploy(Service service, HiveConf configuration) {
/**
* Stop a service.
- * Do nothing if the service is null or not
+ *
Do nothing if the service is null or not
* in a state in which it can be/needs to be stopped.
- *
+ *
* The service state is checked before the operation begins.
* This process is not thread safe.
* @param service a service or null
diff --git service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
index f11c0e4a..9f01c6d 100644
--- service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
+++ service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java
@@ -89,7 +89,7 @@ public static String getKerberosServiceTicket(String principal, String host,
* @param clientUserName Client User name.
* @return An unsigned cookie token generated from input parameters.
* The final cookie generated is of the following format :
- * cu=&rn=&s=
+ * cu=<username>&rn=<randomNumber>&s=<cookieSignature>
*/
public static String createCookieToken(String clientUserName) {
StringBuilder sb = new StringBuilder();
diff --git service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
index e2a6de1..8db996c 100644
--- service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
+++ service/src/java/org/apache/hive/service/auth/PasswdAuthenticationProvider.java
@@ -26,7 +26,7 @@
* to authenticate users for their requests.
* If a user is to be granted, return nothing/throw nothing.
* When a user is to be disallowed, throw an appropriate {@link AuthenticationException}.
- *
+ *
* For an example implementation, see {@link LdapAuthenticationProviderImpl}.
*
* @param user The username received over the connection request
diff --git service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
index 0c41912..ceb2848 100644
--- service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
+++ service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java
@@ -31,12 +31,12 @@
/**
* This class is responsible for setting the ipAddress for operations executed via HiveServer2.
- *
+ *
*
* - IP address is only set for operations that calls listeners with hookContext
* - IP address is only set if the underlying transport mechanism is socket
*
- *
+ *
*
* @see org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext
*/
diff --git service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
index a0708c3..0802bc8 100644
--- service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/CustomQueryFilterFactory.java
@@ -30,7 +30,7 @@
*
* The produced filter object filters out all users that are not found in the search result
* of the query provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
+ * @see org.apache.hadoop.hive.conf.HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY
*/
public class CustomQueryFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
index e0f4518..006720d 100644
--- service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/GroupFilterFactory.java
@@ -35,7 +35,7 @@
*
* The produced filter object filters out all users that are not members of at least one of
* the groups provided in Hive configuration.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER
*/
public final class GroupFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
index df2ba6b..83b6661 100644
--- service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
+++ service/src/java/org/apache/hive/service/auth/ldap/LdapUtils.java
@@ -145,10 +145,10 @@ public static boolean isDn(String name) {
* @param conf Hive configuration
* @param var variable to be read
* @return a list of DN patterns
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_BASEDN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_BASEDN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GUIDKEY
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
*/
public static List parseDnPatterns(HiveConf conf, HiveConf.ConfVars var) {
String patternsString = conf.getVar(var);
@@ -183,8 +183,8 @@ private static String patternToBaseDn(String pattern) {
* Converts a collection of Distinguished Name patterns to a collection of base DNs.
* @param patterns Distinguished Name patterns
* @return a list of base DNs
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN
*/
public static List patternsToBaseDns(Collection patterns) {
List result = new ArrayList<>();
diff --git service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
index 1b391f8..b8c3385 100644
--- service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
+++ service/src/java/org/apache/hive/service/auth/ldap/SearchResultHandler.java
@@ -147,7 +147,7 @@ public void handle(RecordProcessor processor) throws NamingException {
* Implementations of this interface perform the actual work of processing each record,
* but don't need to worry about exception handling, closing underlying data structures,
* and combining results from several search requests.
- * {@see SearchResultHandler}
+ * @see SearchResultHandler
*/
public interface RecordProcessor {
diff --git service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
index c8a6c88..89bf444 100644
--- service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
+++ service/src/java/org/apache/hive/service/auth/ldap/UserFilterFactory.java
@@ -30,7 +30,7 @@
*
* The produced filter object filters out all users that are not on the provided in
* Hive configuration list.
- * @see HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER
+ * @see HiveConf.ConfVars#HIVE_SERVER2_PLAIN_LDAP_USERFILTER
*/
public final class UserFilterFactory implements FilterFactory {
diff --git service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
index 876ade8..ceb68f5 100644
--- service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
+++ service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
@@ -31,7 +31,7 @@
* Convert a SQL search pattern into an equivalent Java Regex.
*
* @param pattern input which may contain '%' or '_' wildcard characters, or
- * these characters escaped using {@link #getSearchStringEscape()}.
+ * these characters escaped using getSearchStringEscape().
* @return replace %/_ with regex search characters, also handle escaped
* characters.
*/
diff --git service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
index 86417a4..093d513 100644
--- service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
+++ service/src/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
@@ -36,9 +36,9 @@
/**
* ClassicTableTypeMapping.
* Classic table type mapping :
- * Managed Table ==> Table
- * External Table ==> Table
- * Virtual View ==> View
+ * Managed Table ==> Table
+ * External Table ==> Table
+ * Virtual View ==> View
*/
public class ClassicTableTypeMapping implements TableTypeMapping {
diff --git service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
index 8f531f7..80ef86b 100644
--- service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
+++ service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
@@ -35,7 +35,7 @@
/**
* Map hive's table type name to client's table type
- * @param clientTypeName
+ * @param hiveTypeName
* @return
*/
public String mapToClientType (String hiveTypeName);
diff --git service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
index fb8141a..209b2fe 100644
--- service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
+++ service/src/java/org/apache/hive/service/server/ThreadFactoryWithGarbageCleanup.java
@@ -30,12 +30,12 @@
* in custom cleanup code to be called before this thread is GC-ed.
* Currently cleans up the following:
* 1. ThreadLocal RawStore object:
- * In case of an embedded metastore, HiveServer2 threads (foreground & background)
+ * In case of an embedded metastore, HiveServer2 threads (foreground & background)
* end up caching a ThreadLocal RawStore object. The ThreadLocal RawStore object has
- * an instance of PersistenceManagerFactory & PersistenceManager.
+ * an instance of PersistenceManagerFactory & PersistenceManager.
* The PersistenceManagerFactory keeps a cache of PersistenceManager objects,
* which are only removed when PersistenceManager#close method is called.
- * HiveServer2 uses ExecutorService for managing thread pools for foreground & background threads.
+ * HiveServer2 uses ExecutorService for managing thread pools for foreground & background threads.
* ExecutorService unfortunately does not provide any hooks to be called,
* when a thread from the pool is terminated.
* As a solution, we're using this ThreadFactory to keep a cache of RawStore objects per thread.
diff --git testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
index 140c198..27f6493 100644
--- testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
+++ testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java
@@ -34,7 +34,7 @@
/**
* Creates 2 tables to query from
*
- * @param num
+ * @param con
*/
public static void createTables(Connection con) throws SQLException {
Statement stmt = con.createStatement();