diff --git beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java
index 378f1b0..d4f3f10 100644
--- beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java
+++ beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java
@@ -26,10 +26,6 @@
import java.sql.Statement;
-/**
- * BeeLineSignalHandler.
- *
- */
public interface BeeLineSignalHandler {
- public void setStatement(Statement stmt);
+ void setStatement(Statement stmt);
}
diff --git beeline/src/java/org/apache/hive/beeline/CommandHandler.java beeline/src/java/org/apache/hive/beeline/CommandHandler.java
index 0441bce..179d9ec 100644
--- beeline/src/java/org/apache/hive/beeline/CommandHandler.java
+++ beeline/src/java/org/apache/hive/beeline/CommandHandler.java
@@ -35,19 +35,19 @@
/**
* @return the name of the command
*/
- public String getName();
+ String getName();
/**
* @return all the possible names of this command.
*/
- public String[] getNames();
+ String[] getNames();
/**
* @return the short help description for this command.
*/
- public String getHelpText();
+ String getHelpText();
/**
@@ -58,7 +58,7 @@
* the command line to check.
* @return the command string that matches, or null if it no match
*/
- public String matches(String line);
+ String matches(String line);
/**
@@ -67,11 +67,11 @@
* @param line
* the full command line to execute.
*/
- public boolean execute(String line);
+ boolean execute(String line);
/**
* Returns the completors that can handle parameters.
*/
- public Completor[] getParameterCompletors();
-}
\ No newline at end of file
+ Completor[] getParameterCompletors();
+}
diff --git beeline/src/java/org/apache/hive/beeline/Commands.java beeline/src/java/org/apache/hive/beeline/Commands.java
index a92d69f..b8635a0 100644
--- beeline/src/java/org/apache/hive/beeline/Commands.java
+++ beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -817,7 +817,6 @@ public boolean quit(String line) {
public boolean closeall(String line) {
if (close(null)) {
while (close(null)) {
- ;
}
return true;
}
diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
index a21fa65..35e0d2d 100644
--- beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
+++ beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
@@ -28,58 +28,58 @@
public interface NestedScriptParser {
- public enum CommandType {
+ enum CommandType {
PARTIAL_STATEMENT,
TERMINATED_STATEMENT,
COMMENT
}
- static final String DEFAUTL_DELIMITER = ";";
+ String DEFAUTL_DELIMITER = ";";
/***
* Find the type of given command
* @param dbCommand
* @return
*/
- public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
+ boolean isPartialCommand(String dbCommand) throws IllegalArgumentException;
/** Parse the DB specific nesting format and extract the inner script name if any
* @param dbCommand command from parent script
* @return
* @throws IllegalFormatException
*/
- public String getScriptName(String dbCommand) throws IllegalArgumentException;
+ String getScriptName(String dbCommand) throws IllegalArgumentException;
/***
* Find if the given command is a nested script execution
* @param dbCommand
* @return
*/
- public boolean isNestedScript(String dbCommand);
+ boolean isNestedScript(String dbCommand);
/***
* Find if the given command is should be passed to DB
* @param dbCommand
* @return
*/
- public boolean isNonExecCommand(String dbCommand);
+ boolean isNonExecCommand(String dbCommand);
/***
* Get the SQL statement delimiter
* @return
*/
- public String getDelimiter();
+ String getDelimiter();
/***
* Clear any client specific tags
* @return
*/
- public String cleanseCommand(String dbCommand);
+ String cleanseCommand(String dbCommand);
/***
* Does the DB required table/column names quoted
* @return
*/
- public boolean needsQuotedIdentifier();
+ boolean needsQuotedIdentifier();
}
@@ -88,7 +88,7 @@
* abstractCommandParser.
*
*/
- private static abstract class AbstractCommandParser implements NestedScriptParser {
+ private abstract static class AbstractCommandParser implements NestedScriptParser {
@Override
public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{
diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index de3ad4e..a2c3c1e 100644
--- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -281,7 +281,7 @@ public void doInit(String toVersion) throws HiveMetaException {
}
} catch (IOException e) {
throw new HiveMetaException("Schema initialization FAILED!" +
- " Metastore state would be inconsistent !!", e);
+ " Metastore state would be inconsistent !!", e);
}
}
diff --git beeline/src/java/org/apache/hive/beeline/SQLCompletor.java beeline/src/java/org/apache/hive/beeline/SQLCompletor.java
index 844b9ae..3772d8c 100644
--- beeline/src/java/org/apache/hive/beeline/SQLCompletor.java
+++ beeline/src/java/org/apache/hive/beeline/SQLCompletor.java
@@ -74,7 +74,6 @@ public SQLCompletor(BeeLine beeLine, boolean skipmeta)
for (StringTokenizer tok = new StringTokenizer(keywords, ", "); tok.hasMoreTokens(); completions
.add(tok.nextToken())) {
- ;
}
// now add the tables and columns from the current connection
diff --git beeline/src/java/org/apache/hive/beeline/util/QFileClient.java beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
index b62a883..1df0c05 100644
--- beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
+++ beeline/src/java/org/apache/hive/beeline/util/QFileClient.java
@@ -135,7 +135,7 @@ void initFilterSet() {
.addFilter(userName, "!!{user.name}!!")
.addFilter(operatorPattern, "\"$1_!!ELIDED!!\"")
;
- };
+ }
public QFileClient setUsername(String username) {
this.username = username;
@@ -241,7 +241,7 @@ private void runQFileTest() throws Exception {
if (1 != beeLine.runCommands(new String[] { "!run " + qFileDirectory + "/" + qFileName })) {
hasErrors = true;
}
-
+
beeLine.runCommands(new String[] { "!record" });
}
diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java
index 63668bc..b64212d 100644
--- cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java
+++ cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java
@@ -499,7 +499,7 @@ public int getStatus() {
public enum ClientResult {
RETURN_OK, RETURN_SERVER_EXCEPTION, RETURN_T_EXCEPTION
- };
+ }
private final ClientResult result;
@@ -533,7 +533,6 @@ public HiveClient getClient() {
when(result.fetchN(anyInt())).thenThrow(exception);
} catch (TException e) {
- ;
}
return result;
} else if (ClientResult.RETURN_T_EXCEPTION.equals(this.result)) {
diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java
index 63b9371..6039e4e 100644
--- cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java
+++ cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java
@@ -115,7 +115,6 @@ public void run() {
output.write(buffer, 0, read);
}
} catch (IOException e) {
- ;
}
}
diff --git common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index e41f87c..265ed85 100644
--- common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -116,19 +116,18 @@ public String getAggregator(Configuration conf) {
/**
* @return List of all supported statistics
*/
- public static final String[] supportedStats = new String[]
- {NUM_FILES,ROW_COUNT,TOTAL_SIZE,RAW_DATA_SIZE};
+ public static final String[] supportedStats = {NUM_FILES,ROW_COUNT,TOTAL_SIZE,RAW_DATA_SIZE};
/**
* @return List of all statistics that need to be collected during query execution. These are
* statistics that inherently require a scan of the data.
*/
- public static final String[] statsRequireCompute = new String[] {ROW_COUNT,RAW_DATA_SIZE};
+ public static final String[] statsRequireCompute = {ROW_COUNT,RAW_DATA_SIZE};
/**
* @return List of statistics that can be collected quickly without requiring a scan of the data.
*/
- public static final String[] fastStats = new String[] {NUM_FILES,TOTAL_SIZE};
+ public static final String[] fastStats = {NUM_FILES,TOTAL_SIZE};
// This string constant is used by stats task to indicate to AlterHandler that
// alterPartition/alterTable is happening via statsTask.
@@ -143,7 +142,7 @@ public String getAggregator(Configuration conf) {
public static final String FALSE = "false";
public static boolean areStatsUptoDate(Map params) {
- String statsAcc = params.get(COLUMN_STATS_ACCURATE);
- return statsAcc == null ? false : statsAcc.equals(TRUE);
+ String statsAcc = params.get(COLUMN_STATS_ACCURATE);
+ return TRUE.equals(statsAcc);
}
}
diff --git common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
index bcb3276..e4018e4 100644
--- common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
+++ common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java
@@ -29,13 +29,13 @@
* Key used to store valid txn list in a
* {@link org.apache.hadoop.conf.Configuration} object.
*/
- public static final String VALID_TXNS_KEY = "hive.txn.valid.txns";
+ String VALID_TXNS_KEY = "hive.txn.valid.txns";
/**
* The response to a range query. NONE means no values in this range match,
* SOME mean that some do, and ALL means that every value does.
*/
- public enum RangeResponse {NONE, SOME, ALL};
+ enum RangeResponse {NONE, SOME, ALL}
/**
* Indicates whether a given transaction has been committed and should be
@@ -43,7 +43,7 @@
* @param txnid id for the transaction
* @return true if committed, false otherwise
*/
- public boolean isTxnCommitted(long txnid);
+ boolean isTxnCommitted(long txnid);
/**
* Find out if a range of transaction ids have been committed.
@@ -52,31 +52,31 @@
* @return Indicate whether none, some, or all of these transactions have been
* committed.
*/
- public RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId);
+ RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId);
/**
* Write this validTxnList into a string. This should produce a string that
* can be used by {@link #readFromString(String)} to populate a validTxnsList.
*/
- public String writeToString();
+ String writeToString();
/**
* Populate this validTxnList from the string. It is assumed that the string
* was created via {@link #writeToString()}.
* @param src source string.
*/
- public void readFromString(String src);
+ void readFromString(String src);
/**
* Get the largest committed transaction id.
* @return largest committed transaction id
*/
- public long getHighWatermark();
+ long getHighWatermark();
/**
* Get the list of transactions under the high water mark that are still
* open.
* @return a list of open transaction ids
*/
- public long[] getOpenTransactions();
+ long[] getOpenTransactions();
}
diff --git common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java
index 1334ed6..bcefca2 100644
--- common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java
+++ common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java
@@ -26,19 +26,19 @@
/**
* Intended for use by any project or application.
*/
- @Documented public @interface Public {};
+ @Documented public @interface Public {}
/**
* Intended only for the project(s) specified in the annotation
*/
@Documented public @interface LimitedPrivate {
String[] value();
- };
+ }
/**
* Intended for use only within Hive itself.
*/
- @Documented public @interface Private {};
+ @Documented public @interface Private {}
private InterfaceAudience() {} // Audience can't exist on its own
}
diff --git common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java
index 2a12806..c72a916 100644
--- common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java
+++ common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java
@@ -25,22 +25,22 @@
*/
public class InterfaceStability {
/**
- * Can evolve while retaining compatibility for minor release boundaries.;
+ * Can evolve while retaining compatibility for minor release boundaries.
* can break compatibility only at major release (ie. at m.0).
*/
@Documented
- public @interface Stable {};
-
+ public @interface Stable {}
+
/**
* Evolving, but can break compatibility at minor release (i.e. m.x)
*/
@Documented
- public @interface Evolving {};
-
+ public @interface Evolving {}
+
/**
* No guarantee is provided as to reliability or stability across any
* level of release granularity.
*/
@Documented
- public @interface Unstable {};
+ public @interface Unstable {}
}
diff --git common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java
index 9ca8e6d..dbedf2e 100644
--- common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java
+++ common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java
@@ -29,5 +29,5 @@
* @param fileName the name of the file
* @exception IOException if an I/O error occurs.
*/
- public int processFile(String fileName) throws IOException;
+ int processFile(String fileName) throws IOException;
}
diff --git common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
index 19946d9..7c6d11b 100644
--- common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
+++ common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java
@@ -30,7 +30,7 @@
/**
* Check if we're tracking a certain named key/metric
*/
- public abstract boolean hasKey(String name);
+ boolean hasKey(String name);
/**
* Add a key/metric and its value to track
@@ -38,7 +38,7 @@
* @param value value associated with the key
* @throws Exception
*/
- public abstract void put(String name, Object value) throws IOException;
+ void put(String name, Object value) throws IOException;
/**
*
@@ -46,11 +46,11 @@
* @return value associated with the key
* @throws Exception
*/
- public abstract Object get(String name) throws IOException;
-
+ Object get(String name) throws IOException;
+
/**
- * Removes all the keys and values from this MetricsMBean.
+ * Removes all the keys and values from this MetricsMBean.
*/
void clear();
}
diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ed22dbd..75c404c 100644
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -181,7 +181,7 @@
* with non-null values to this list as they will override any values defined
* in the underlying Hadoop configuration.
*/
- public static enum ConfVars {
+ public enum ConfVars {
// QL execution stuff
SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
PLAN("hive.exec.plan", "", ""),
@@ -282,9 +282,9 @@
LOCALMODEAUTO("hive.exec.mode.local.auto", false,
"Let Hive determine whether to run in local mode automatically"),
- LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
+ LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
"When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
- LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
+ LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
"When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),
DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
@@ -355,7 +355,7 @@
"The number of times to retry a HMSHandler call if there were a connection error"),
HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000,
"The number of milliseconds between HMSHandler retry attempts"),
- HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
+ HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
"Whether to force reloading of the HMSHandler configuration (including\n" +
"the connection URL, before the next metastore query that accesses the\n" +
"datastore. Once reloaded, this value is reset to false. Used for\n" +
@@ -368,7 +368,7 @@
"Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
- "_INTERMEDIATE_ORIGINAL",
+ "_INTERMEDIATE_ORIGINAL",
"Intermediate dir suffixes used for archiving. Not important what they\n" +
"are, as long as collisions are avoided"),
METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
@@ -544,7 +544,7 @@
HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
"Whether to log Hive query, query plan, runtime statistics etc."),
- HIVEQUERYSTRING("hive.query.string", "",
+ HIVEQUERYSTRING("hive.query.string", "",
"Query being executed (might be multiple per a session)"),
HIVEQUERYID("hive.query.id", "",
@@ -783,8 +783,8 @@
" for small ORC files. Note that enabling this config will not honor padding tolerance\n" +
" config (hive.exec.orc.block.padding.tolerance)."),
HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level",
- "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat",
- "Input file format to use for ORC stripe level merging (for internal use only)"),
+ "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat",
+ "Input file format to use for ORC stripe level merging (for internal use only)"),
HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
"hive.merge.current.job.has.dynamic.partitions", false, ""),
@@ -799,7 +799,7 @@
HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""),
HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M
- HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f,
+ HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f,
"Maximum fraction of heap that can be used by ORC file writers"),
HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null,
"Define the version of the file to write"),
@@ -1080,8 +1080,8 @@
"The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout", 30,
"Timeout value (number of seconds) used by JDBC connection and statements."),
- HIVE_STATS_ATOMIC("hive.stats.atomic", false,
- "whether to update metastore stats only if all stats are available"),
+ HIVE_STATS_ATOMIC("hive.stats.atomic", false,
+ "whether to update metastore stats only if all stats are available"),
HIVE_STATS_RETRIES_MAX("hive.stats.retries.max", 0,
"Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. \n" +
"Default is no tries on failures."),
@@ -1638,7 +1638,7 @@
"Exceeding this will trigger a flush irrelevant of memory pressure condition."),
HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1,
"Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."),
-
+
HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, ""),
diff --git common/src/java/org/apache/hadoop/hive/conf/Validator.java common/src/java/org/apache/hadoop/hive/conf/Validator.java
index cea9c41..c5faabc 100644
--- common/src/java/org/apache/hadoop/hive/conf/Validator.java
+++ common/src/java/org/apache/hadoop/hive/conf/Validator.java
@@ -31,7 +31,7 @@
String validate(String value);
- static class StringSet implements Validator {
+ class StringSet implements Validator {
private final Set expected = new LinkedHashSet();
@@ -50,7 +50,7 @@ public String validate(String value) {
}
}
- static enum RANGE_TYPE {
+ enum RANGE_TYPE {
INT {
@Override
protected boolean inRange(String value, Object lower, Object upper) {
@@ -90,7 +90,7 @@ public static RANGE_TYPE valueOf(Object lower, Object upper) {
protected abstract boolean inRange(String value, Object lower, Object upper);
}
- static class RangeValidator implements Validator {
+ class RangeValidator implements Validator {
private final RANGE_TYPE type;
private final Object lower, upper;
@@ -117,7 +117,7 @@ public String validate(String value) {
}
}
- static class PatternSet implements Validator {
+ class PatternSet implements Validator {
private final List expected = new ArrayList();
@@ -141,7 +141,7 @@ public String validate(String value) {
}
}
- static class RatioValidator implements Validator {
+ class RatioValidator implements Validator {
@Override
public String validate(String value) {
diff --git common/src/java/org/apache/hive/common/util/HiveStringUtils.java common/src/java/org/apache/hive/common/util/HiveStringUtils.java
index c21c937..6aeebd4 100644
--- common/src/java/org/apache/hive/common/util/HiveStringUtils.java
+++ common/src/java/org/apache/hive/common/util/HiveStringUtils.java
@@ -51,6 +51,7 @@
@InterfaceStability.Unstable
public class HiveStringUtils {
+
/**
* Priority of the StringUtils shutdown hook.
*/
@@ -636,13 +637,13 @@ public static String getHostname() {
catch(UnknownHostException uhe) {return "" + uhe;}
}
-
+
/**
* The traditional binary prefixes, kilo, mega, ..., exa,
* which can be represented by a 64-bit integer.
* TraditionalBinaryPrefix symbol are case insensitive.
*/
- public static enum TraditionalBinaryPrefix {
+ public enum TraditionalBinaryPrefix {
KILO(1024),
MEGA(KILO.value << 10),
GIGA(MEGA.value << 10),
diff --git contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java
index fa4074f..2e06cb3 100644
--- contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java
+++ contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java
@@ -112,7 +112,6 @@ public boolean next(LongWritable key, BytesWritable value) throws IOException {
int i;
for (i = 0; i < binaryData.length && i < signature.length
&& binaryData[i] == signature[i]; ++i) {
- ;
}
// return the row only if it's not corrupted
diff --git contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
index 0c13f5e..6ef2e77 100644
--- contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
+++ contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java
@@ -31,21 +31,21 @@
/**
* This class attempts to provide a simple framework for writing Hive map/reduce
* tasks in java.
- *
+ *
* The main benefit is that it deals with grouping the keys together for reduce
* tasks.
- *
+ *
* Additionally, it deals with all system io... and provides something closer to
* the hadoop m/r.
- *
+ *
* As an example, here's the wordcount reduce:
- *
+ *
* new GenericMR().reduce(System.in, System.out, new Reducer() { public void
* reduce(String key, Iterator records, Output output) throws
* Exception { int count = 0;
- *
+ *
* while (records.hasNext()) { count += Integer.parseInt(records.next()[1]); }
- *
+ *
* output.collect(new String[] { key, String.valueOf(count) }); }});
*/
public final class GenericMR {
@@ -96,7 +96,7 @@ private void handle(final Reader in, final Writer out,
}
}
- private static interface RecordProcessor {
+ private interface RecordProcessor {
void processNext(final RecordReader reader, final Output output) throws Exception;
}
@@ -169,7 +169,7 @@ private boolean hasNext() {
private void close() throws Exception {
reader.close();
-
+
}
}
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java
index 63a4d62..84f0060 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java
@@ -72,7 +72,7 @@
* @param errorCode the error code
* @param errorMessage the error message
*/
- private ErrorType(int errorCode, String errorMessage) {
+ ErrorType(int errorCode, String errorMessage) {
this.errorCode = errorCode;
this.errorMessage = errorMessage;
}
@@ -83,7 +83,7 @@ private ErrorType(int errorCode, String errorMessage) {
* @param errorMessage the error message
* @param appendCauseMessage should causal exception message be appended to error message
*/
- private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage) {
+ ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage) {
this.errorCode = errorCode;
this.errorMessage = errorMessage;
this.appendCauseMessage = appendCauseMessage;
@@ -96,7 +96,7 @@ private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage
* @param appendCauseMessage should causal exception message be appended to error message
* @param isRetriable is this a retriable error
*/
- private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage, boolean isRetriable) {
+ ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage, boolean isRetriable) {
this.errorCode = errorCode;
this.errorMessage = errorMessage;
this.appendCauseMessage = appendCauseMessage;
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
index cd0f177..e7b7202 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java
@@ -32,11 +32,11 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HCatFieldSchema implements Serializable {
-/*the implementation of HCatFieldSchema is a bit messy since with the addition of parametrized
-types (e.g. char(7)) we need to represent something richer than an enum but for backwards
+/*the implementation of HCatFieldSchema is a bit messy since with the addition of parametrized
+types (e.g. char(7)) we need to represent something richer than an enum but for backwards
compatibility (and effort required to do full refactoring) this class has both 'type' and 'typeInfo';
similarly for mapKeyType/mapKeyTypeInfo */
-
+
public enum Type {
/*this captures mapping of Hive type names to HCat type names; in the long run
* we should just use Hive types directly but that is a larger refactoring effort
@@ -54,23 +54,23 @@
CHAR(PrimitiveObjectInspector.PrimitiveCategory.CHAR),
VARCHAR(PrimitiveObjectInspector.PrimitiveCategory.VARCHAR),
BINARY(PrimitiveObjectInspector.PrimitiveCategory.BINARY),
- DATE(PrimitiveObjectInspector.PrimitiveCategory.DATE),
- TIMESTAMP(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP),
+ DATE(PrimitiveObjectInspector.PrimitiveCategory.DATE),
+ TIMESTAMP(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP),
ARRAY(ObjectInspector.Category.LIST),
MAP(ObjectInspector.Category.MAP),
STRUCT(ObjectInspector.Category.STRUCT);
-
+
private final ObjectInspector.Category category;
private final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory;
- private Type(ObjectInspector.Category cat) {
+ Type(ObjectInspector.Category cat) {
category = cat;
primitiveCategory = null;
- assert category != ObjectInspector.Category.PRIMITIVE :
+ assert category != ObjectInspector.Category.PRIMITIVE :
"This c'tor should be used for complex category types";
}
- private Type(PrimitiveObjectInspector.PrimitiveCategory primCat) {
+ Type(PrimitiveObjectInspector.PrimitiveCategory primCat) {
category = ObjectInspector.Category.PRIMITIVE;
primitiveCategory = primCat;
}
@@ -218,7 +218,7 @@ public HCatFieldSchema(String fieldName, Type type, String comment) throws HCatE
public HCatFieldSchema(String fieldName, PrimitiveTypeInfo typeInfo, String comment)
throws HCatException {
this.fieldName = fieldName;
- //HCatUtil.assertNotNull(fieldName, "fieldName cannot be null");//seems sometimes it can be
+ //HCatUtil.assertNotNull(fieldName, "fieldName cannot be null");//seems sometimes it can be
// null, for ARRAY types in particular (which may be a complex type)
this.category = Category.PRIMITIVE;
this.typeInfo = typeInfo;
@@ -263,7 +263,7 @@ private void setName(String name) {
*/
public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema mapValueSchema, String comment) throws HCatException {
assertTypeInCategory(type, Category.MAP, fieldName);
- //Hive only supports primitive map keys:
+ //Hive only supports primitive map keys:
//https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types#LanguageManualTypes-ComplexTypes
assertTypeInCategory(mapKeyType, Category.PRIMITIVE, fieldName);
this.fieldName = fieldName;
@@ -274,16 +274,16 @@ public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema
this.subSchema.get(0).setName(null);
this.comment = comment;
}
- public static HCatFieldSchema createMapTypeFieldSchema(String fieldName, PrimitiveTypeInfo mapKeyType,
- HCatSchema mapValueSchema,
+ public static HCatFieldSchema createMapTypeFieldSchema(String fieldName, PrimitiveTypeInfo mapKeyType,
+ HCatSchema mapValueSchema,
String comment) throws HCatException {
- HCatFieldSchema mapSchema = new HCatFieldSchema(fieldName, Type.MAP,
- Type.getPrimitiveHType(mapKeyType),
+ HCatFieldSchema mapSchema = new HCatFieldSchema(fieldName, Type.MAP,
+ Type.getPrimitiveHType(mapKeyType),
mapValueSchema, comment);
mapSchema.mapKeyTypeInfo = mapKeyType;
return mapSchema;
}
-
+
public HCatSchema getStructSubSchema() throws HCatException {
assertTypeInCategory(this.type, Category.STRUCT, this.fieldName);
@@ -395,11 +395,11 @@ public boolean equals(Object obj) {
@Override
public int hashCode() {
- //result could be cached if this object were to be made immutable...
+ //result could be cached if this object were to be made immutable...
int result = 17;
result = 31 * result + (category == null ? 0 : category.hashCode());
result = 31 * result + (fieldName == null ? 0 : fieldName.hashCode());
- result = 31 * result + (getTypeString() == null ? 0 :
+ result = 31 * result + (getTypeString() == null ? 0 :
getTypeString().hashCode());
return result;
}
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java
index edf3654..246cb0f 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java
@@ -36,6 +36,6 @@
* as one slave can be used to read multiple splits.
* @return number of splits
*/
- public int numSplits();
+ int numSplits();
}
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java
index 06bba68..1545a0a 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java
@@ -31,5 +31,5 @@
*
* @return id
*/
- public int getId();
+ int getId();
}
diff --git hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
index ae60030..b8b76a5 100644
--- hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
+++ hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java
@@ -100,7 +100,7 @@
/**
* valid values for ON_OOR_VALUE_OPT
*/
- public static enum OOR_VALUE_OPT_VALUES {Null, Throw}
+ public enum OOR_VALUE_OPT_VALUES {Null, Throw}
protected String sign;
//it's key that this is a per HCatStorer instance object
private final DataLossLogger dataLossLogger = new DataLossLogger();
@@ -171,7 +171,7 @@ protected HCatSchema convertPigSchemaToHCatSchema(Schema pigSchema, HCatSchema t
throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he);
}
}
-
+
HCatSchema s = new HCatSchema(fieldSchemas);
LOG.debug("convertPigSchemaToHCatSchema(computed)=(" + s + ")");
return s;
@@ -189,7 +189,7 @@ public static boolean removeTupleFromBag(HCatFieldSchema hcatFieldSchema, FieldS
return false;
}
/**
- * Here we are processing HCat table schema as derived from metastore,
+ * Here we are processing HCat table schema as derived from metastore,
* thus it should have information about all fields/sub-fields, but not for partition columns
*/
private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema hcatFieldSchema,
@@ -273,7 +273,7 @@ private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema
List valFSList = new ArrayList(1);
if (hcatFieldSchema != null) {
- return HCatFieldSchema.createMapTypeFieldSchema(fSchema.alias, hcatFieldSchema.getMapKeyTypeInfo(),
+ return HCatFieldSchema.createMapTypeFieldSchema(fSchema.alias, hcatFieldSchema.getMapKeyTypeInfo(),
hcatFieldSchema.getMapValueSchema(), "");
}
@@ -402,7 +402,7 @@ private Object getJavaObj(Object pigObj, HCatFieldSchema hcatFS) throws HCatExce
return HiveDecimal.create(bd);
case CHAR:
String charVal = (String)pigObj;
- CharTypeInfo cti = (CharTypeInfo)hcatFS.getTypeInfo();
+ CharTypeInfo cti = (CharTypeInfo)hcatFS.getTypeInfo();
if(charVal.length() > cti.getLength()) {
handleOutOfRangeValue(pigObj, hcatFS);
return null;
@@ -454,7 +454,7 @@ private void handleOutOfRangeValue(Object pigObj, HCatFieldSchema hcatFS) throws
/**
* depending on user config, throws an exception or logs a msg if the incoming Pig value is
* out-of-range for target type.
- * @param additionalMsg may be {@code null}
+ * @param additionalMsg may be {@code null}
*/
private void handleOutOfRangeValue(Object pigObj, HCatFieldSchema hcatFS, String additionalMsg) throws BackendException {
String msg = "Pig value '" + pigObj + "' is outside the bounds of column " + hcatFS.getName() +
@@ -507,8 +507,8 @@ protected void doSchemaValidations(Schema pigSchema, HCatSchema tblSchema) throw
* @throws HCatException
* @throws FrontendException
*/
- private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField,
- Schema topLevelPigSchema, HCatSchema topLevelHCatSchema,
+ private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField,
+ Schema topLevelPigSchema, HCatSchema topLevelHCatSchema,
int columnPos)
throws HCatException, FrontendException {
validateAlias(pigField.alias);
@@ -528,7 +528,7 @@ private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField,
case DataType.BAG:
HCatSchema arrayElementSchema = hcatField == null ? null : hcatField.getArrayElementSchema();
for (FieldSchema innerField : pigField.schema.getField(0).schema.getFields()) {
- validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema),
+ validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema),
topLevelPigSchema, topLevelHCatSchema, columnPos);
}
break;
@@ -564,7 +564,7 @@ else if(hcatField != null) {
throwTypeMismatchException(type, Lists.newArrayList(Type.BOOLEAN), hcatField, columnPos);
break;
case DataType.CHARARRAY:
- throwTypeMismatchException(type, Lists.newArrayList(Type.STRING, Type.CHAR, Type.VARCHAR),
+ throwTypeMismatchException(type, Lists.newArrayList(Type.STRING, Type.CHAR, Type.VARCHAR),
hcatField, columnPos);
break;
case DataType.DOUBLE:
@@ -574,15 +574,15 @@ else if(hcatField != null) {
throwTypeMismatchException(type, Lists.newArrayList(Type.FLOAT), hcatField, columnPos);
break;
case DataType.INTEGER:
- throwTypeMismatchException(type, Lists.newArrayList(Type.INT, Type.BIGINT,
+ throwTypeMismatchException(type, Lists.newArrayList(Type.INT, Type.BIGINT,
Type.TINYINT, Type.SMALLINT), hcatField, columnPos);
break;
case DataType.LONG:
throwTypeMismatchException(type, Lists.newArrayList(Type.BIGINT), hcatField, columnPos);
break;
default:
- throw new FrontendException("'" + type +
- "' Pig datatype in column " + columnPos + "(0-based) is not supported by HCat",
+ throw new FrontendException("'" + type +
+ "' Pig datatype in column " + columnPos + "(0-based) is not supported by HCat",
PigHCatUtil.PIG_EXCEPTION_CODE);
}
}
@@ -596,12 +596,12 @@ else if(hcatField != null) {
}
}
private static void throwTypeMismatchException(byte pigDataType,
- List hcatRequiredType, HCatFieldSchema hcatActualField,
+ List hcatRequiredType, HCatFieldSchema hcatActualField,
int columnPos) throws FrontendException {
if(!hcatRequiredType.contains(hcatActualField.getType())) {
- throw new FrontendException(
- "Pig '" + DataType.findTypeName(pigDataType) + "' type in column " +
- columnPos + "(0-based) cannot map to HCat '" +
+ throw new FrontendException(
+ "Pig '" + DataType.findTypeName(pigDataType) + "' type in column " +
+ columnPos + "(0-based) cannot map to HCat '" +
hcatActualField.getType() + "'type. Target filed must be of HCat type {" +
StringUtils.join(hcatRequiredType, " or ") + "}");
}
@@ -639,7 +639,7 @@ public void storeStatistics(ResourceStatistics stats, String arg1, Job job) thro
}
/**
- * todo: when job is complete, should print the msgCount table to log
+ * todo: when job is complete, should print the msgCount table to log
*/
private static final class DataLossLogger {
private static final Map msgCount = new HashMap();
diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
index 4d77057..67a7110 100644
--- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
+++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java
@@ -30,7 +30,7 @@
/**
* Enumeration of all supported types of Metastore operations.
*/
- public static enum EventType {
+ public enum EventType {
CREATE_DATABASE(HCatConstants.HCAT_CREATE_DATABASE_EVENT),
DROP_DATABASE(HCatConstants.HCAT_DROP_DATABASE_EVENT),
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
index 24277c4..1fd84a8 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java
@@ -26,18 +26,18 @@
* @param transactionId the ID of the Txn in which the write occurs
* @param record the record to be written
*/
- public void write(long transactionId, byte[] record) throws StreamingException;
+ void write(long transactionId, byte[] record) throws StreamingException;
/** Flush records from buffer. Invoked by TransactionBatch.commit() */
- public void flush() throws StreamingException;
+ void flush() throws StreamingException;
/** Clear bufferred writes. Invoked by TransactionBatch.abort() */
- public void clear() throws StreamingException;
+ void clear() throws StreamingException;
/** Acquire a new RecordUpdater. Invoked when
* StreamingConnection.fetchTransactionBatch() is called */
- public void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException;
+ void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException;
/** Close the RecordUpdater. Invoked by TransactionBatch.close() */
- public void closeBatch() throws StreamingException;
+ void closeBatch() throws StreamingException;
}
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
index 25acff0..8d640d6 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java
@@ -37,13 +37,12 @@
* @throws StreamingException
* @return a batch of transactions
*/
- public TransactionBatch fetchTransactionBatch(int numTransactionsHint,
- RecordWriter writer)
+ TransactionBatch fetchTransactionBatch(int numTransactionsHint, RecordWriter writer)
throws ConnectionError, StreamingException, InterruptedException;
/**
* Close connection
*/
- public void close();
+ void close();
}
diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
index d9a083d..b4603b6 100644
--- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
+++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java
@@ -29,46 +29,46 @@
*
*/
public interface TransactionBatch {
- public enum TxnState {INACTIVE, OPEN, COMMITTED, ABORTED }
+ enum TxnState {INACTIVE, OPEN, COMMITTED, ABORTED }
/**
* Activate the next available transaction in the current transaction batch
* @throws StreamingException if not able to switch to next Txn
* @throws InterruptedException if call in interrupted
*/
- public void beginNextTransaction() throws StreamingException, InterruptedException;
+ void beginNextTransaction() throws StreamingException, InterruptedException;
/**
* Get Id of currently open transaction
* @return transaction id
*/
- public Long getCurrentTxnId();
+ Long getCurrentTxnId();
/**
* get state of current transaction
*/
- public TxnState getCurrentTransactionState();
+ TxnState getCurrentTransactionState();
/**
* Commit the currently open transaction
* @throws StreamingException if there are errors committing
* @throws InterruptedException if call in interrupted
*/
- public void commit() throws StreamingException, InterruptedException;
+ void commit() throws StreamingException, InterruptedException;
/**
* Abort the currently open transaction
* @throws StreamingException if there are errors
* @throws InterruptedException if call in interrupted
*/
- public void abort() throws StreamingException, InterruptedException;
+ void abort() throws StreamingException, InterruptedException;
/**
* Remaining transactions are the ones that are not committed or aborted or open.
* Current open transaction is not considered part of remaining txns.
* @return number of transactions remaining this batch.
*/
- public int remainingTransactions();
+ int remainingTransactions();
/**
@@ -77,14 +77,14 @@
* @throws StreamingException if there are errors when writing
* @throws InterruptedException if call in interrupted
*/
- public void write(byte[] record) throws StreamingException, InterruptedException;
+ void write(byte[] record) throws StreamingException, InterruptedException;
/**
* Write records using RecordWriter
* @throws StreamingException if there are errors when writing
* @throws InterruptedException if call in interrupted
*/
- public void write(Collection records) throws StreamingException, InterruptedException;
+ void write(Collection records) throws StreamingException, InterruptedException;
/**
@@ -92,12 +92,12 @@
* to keep them from expiring
* @throws StreamingException if there are errors
*/
- public void heartbeat() throws StreamingException;
+ void heartbeat() throws StreamingException;
/**
* Close the TransactionBatch
* @throws StreamingException if there are errors closing batch
* @throws InterruptedException if call in interrupted
*/
- public void close() throws StreamingException, InterruptedException;
+ void close() throws StreamingException, InterruptedException;
}
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java
index 6f42b7b..6fb98af 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java
@@ -25,11 +25,9 @@
import org.apache.commons.exec.ExecuteException;
public interface ExecService {
- public ExecBean run(String program, List args,
- Map env)
+ ExecBean run(String program, List args, Map env)
throws NotAuthorizedException, BusyException, ExecuteException, IOException;
- public ExecBean runUnlimited(String program, List args,
- Map env)
+ ExecBean runUnlimited(String program, List args, Map env)
throws NotAuthorizedException, ExecuteException, IOException;
}
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
index c7e92cf..d9a5c10 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java
@@ -47,7 +47,7 @@
public class LauncherDelegator extends TempletonDelegator {
private static final Log LOG = LogFactory.getLog(LauncherDelegator.class);
protected String runAs = null;
- static public enum JobType {JAR, STREAMING, PIG, HIVE, SQOOP}
+ public enum JobType {JAR, STREAMING, PIG, HIVE, SQOOP}
private boolean secureMeatastoreAccess = false;
private final String HIVE_SHIMS_FILENAME_PATTERN = ".*hive-shims.*";
@@ -90,7 +90,7 @@ public EnqueueBean enqueueController(String user, Map userArgs,
if (id == null) {
throw new QueueException("Unable to get job id");
}
-
+
registerJob(id, user, callback, userArgs);
return new EnqueueBean(id);
@@ -246,7 +246,7 @@ public static void addDef(List args, String name, String val) {
}
/**
* This is called by subclasses when they determined that the sumbmitted job requires
- * metastore access (e.g. Pig job that uses HCatalog). This then determines if
+ * metastore access (e.g. Pig job that uses HCatalog). This then determines if
* secure access is required and causes TempletonControllerJob to set up a delegation token.
* @see TempletonControllerJob
*/
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
index d2127e1..34c47f8 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java
@@ -653,7 +653,7 @@ public EnqueueBean mapReduceStreaming(@FormParam("input") List inputs,
verifyParam(inputs, "input");
verifyParam(mapper, "mapper");
verifyParam(reducer, "reducer");
-
+
Map userArgs = new HashMap();
userArgs.put("user.name", getDoAsUser());
userArgs.put("input", inputs);
@@ -680,8 +680,8 @@ public EnqueueBean mapReduceStreaming(@FormParam("input") List inputs,
/**
* Run a MapReduce Jar job.
* Params correspond to the REST api params
- * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
- * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
+ * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access
+ * metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* @param callback URL which WebHCat will call when the hive job finishes
* @see org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob
*/
@@ -703,7 +703,7 @@ public EnqueueBean mapReduceJar(@FormParam("jar") String jar,
verifyUser();
verifyParam(jar, "jar");
verifyParam(mainClass, "class");
-
+
Map userArgs = new HashMap();
userArgs.put("user.name", getDoAsUser());
userArgs.put("jar", jar);
@@ -729,7 +729,7 @@ public EnqueueBean mapReduceJar(@FormParam("jar") String jar,
* Run a Pig job.
* Params correspond to the REST api params. If '-useHCatalog' is in the {@code pigArgs, usesHcatalog},
* is interpreted as true.
- * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
+ * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access
* metastore, which requires additional steps for WebHCat to perform in a secure cluster.
* This does nothing to ensure that Pig is installed on target node in the cluster.
* @param callback URL which WebHCat will call when the hive job finishes
@@ -752,7 +752,7 @@ public EnqueueBean pig(@FormParam("execute") String execute,
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map userArgs = new HashMap();
userArgs.put("user.name", getDoAsUser());
@@ -819,7 +819,7 @@ public EnqueueBean sqoop(@FormParam("command") String command,
* @param execute SQL statement to run, equivalent to "-e" from hive command line
* @param srcFile name of hive script file to run, equivalent to "-f" from hive
* command line
- * @param hiveArgs additional command line argument passed to the hive command line.
+ * @param hiveArgs additional command line argument passed to the hive command line.
* Please check https://cwiki.apache.org/Hive/languagemanual-cli.html
* for detailed explanation of command line arguments
* @param otherFiles additional files to be shipped to the launcher, such as the jars
@@ -846,7 +846,7 @@ public EnqueueBean hive(@FormParam("execute") String execute,
if (execute == null && srcFile == null) {
throw new BadParam("Either execute or file parameter required");
}
-
+
//add all function arguments to a map
Map userArgs = new HashMap();
userArgs.put("user.name", getDoAsUser());
@@ -903,42 +903,42 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid)
* Example usages:
* 1. curl -s 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan'
* Return all the Job IDs submitted by hsubramaniyan
- * 2. curl -s
+ * 2. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&showall=true'
* Return all the Job IDs that are visible to hsubramaniyan
* 3. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&jobid=job_201312091733_0003'
* Return all the Job IDs for hsubramaniyan after job_201312091733_0003.
- * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
+ * 4. curl -s 'http://localhost:50111/templeton/v1/jobs?
* user.name=hsubramaniyan&jobid=job_201312091733_0003&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
- * 5. curl -s
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003.
+ * 5. curl -s
* 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&numrecords=5'
- * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
+ * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list
* lexicographically.
*
*
* Supporting pagination using "jobid" and "numrecords" parameters:
* Step 1: Get the start "jobid" = job_xxx_000, "numrecords" = n
- * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
- * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
+ * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid"
+ * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's
* last record and get the Job Id of the last record as job_yyy_k, else quit.
* Step 4: set "jobid"=job_yyy_k and go to step 2.
- *
+ *
* @param fields If "fields" set to "*", the request will return full details of the job.
* If "fields" is missing, will only return the job ID. Currently the value can only
* be "*", other values are not allowed and will throw exception.
* @param showall If "showall" is set to "true", the request will return all jobs the user
* has permission to view, not only the jobs belonging to the user.
- * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
- * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
- * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
+ * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater
+ * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001",
+ * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of
* records returned depends on the value of "numrecords".
- * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
- * records appearing after "jobid" will be returned after sorting the Job Id list
- * lexicographically.
- * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
- * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
+ * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords
+ * records appearing after "jobid" will be returned after sorting the Job Id list
+ * lexicographically.
+ * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will
+ * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present
* and "numrecords" is missing, all the records whose Job Id is greater than "jobid" are returned.
* @return list of job items based on the filter conditions specified by the user.
*/
@@ -950,7 +950,7 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid)
@QueryParam("jobid") String jobid,
@QueryParam("numrecords") String numrecords)
throws NotAuthorizedException, BadParam, IOException, InterruptedException {
-
+
verifyUser();
boolean showDetails = false;
@@ -971,9 +971,9 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid)
try {
if (numrecords != null) {
numRecords = Integer.parseInt(numrecords);
- if (numRecords <= 0) {
- throw new BadParam("numrecords should be an integer > 0");
- }
+ if (numRecords <= 0) {
+ throw new BadParam("numrecords should be an integer > 0");
+ }
}
else {
numRecords = -1;
@@ -983,18 +983,18 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid)
throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0");
}
- // Sort the list lexicographically
+ // Sort the list lexicographically
Collections.sort(list);
for (String job : list) {
// If numRecords = -1, fetch all records.
// Hence skip all the below checks when numRecords = -1.
if (numRecords != -1) {
- // If currRecord >= numRecords, we have already fetched the top #numRecords
+ // If currRecord >= numRecords, we have already fetched the top #numRecords
if (currRecord >= numRecords) {
break;
- }
- // If the current record needs to be returned based on the
+ }
+ // If the current record needs to be returned based on the
// filter conditions specified by the user, increment the counter
else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) {
currRecord++;
@@ -1101,7 +1101,7 @@ public void verifyDdlParam(String param, String name)
* value of user.name query param, in kerberos mode it's the kinit'ed user.
*/
private String getRequestingUser() {
- if (theSecurityContext == null) {
+ if (theSecurityContext == null) {
return null;
}
String userName = null;
@@ -1114,7 +1114,7 @@ private String getRequestingUser() {
if(userName == null) {
return null;
}
- //map hue/foo.bar@something.com->hue since user group checks
+ //map hue/foo.bar@something.com->hue since user group checks
// and config files are in terms of short name
return UserGroupInformation.createRemoteUser(userName).getShortUserName();
}
@@ -1161,7 +1161,7 @@ private static String getRequestingHost(String requestingUser, HttpServletReques
return unkHost;
}
}
-
+
private void checkEnableLogPrerequisite(boolean enablelog, String statusdir) throws BadParam {
if (enablelog && !TempletonUtils.isset(statusdir))
throw new BadParam("enablelog is only applicable when statusdir is set");
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java
index f7cc3e9..3ddb8f5 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java
@@ -137,7 +137,7 @@ public boolean equals(Object o) {
* Ther ASC or DESC sort order.
*/
@XmlRootElement
- public static enum SortDirectionDesc {
+ public enum SortDirectionDesc {
ASC, DESC
}
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
index addd0c2..36b64da 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java
@@ -169,9 +169,9 @@ public String getParent() throws IOException {
String childJobIDs = getField("children");
if (childJobIDs != null) {
for (String jobid : childJobIDs.split(",")) {
- children.add(new JobState(jobid, config));
+ children.add(new JobState(jobid, config));
}
- }
+ }
return children;
}
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java
index ccf6107..f537e63 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java
@@ -19,43 +19,42 @@
package org.apache.hive.hcatalog.templeton.tool;
public interface JobSubmissionConstants {
- public static final String COPY_NAME = "templeton.copy";
- public static final String STATUSDIR_NAME = "templeton.statusdir";
- public static final String ENABLE_LOG = "templeton.enablelog";
- public static final String JOB_TYPE = "templeton.jobtype";
- public static final String JAR_ARGS_NAME = "templeton.args";
- public static final String TEMPLETON_JOB_LAUNCH_TIME_NAME = "templeton.job.launch.time";
- public static final String OVERRIDE_CLASSPATH = "templeton.override-classpath";
- public static final String STDOUT_FNAME = "stdout";
- public static final String STDERR_FNAME = "stderr";
- public static final String EXIT_FNAME = "exit";
- public static final int WATCHER_TIMEOUT_SECS = 10;
- public static final int KEEP_ALIVE_MSEC = 60 * 1000;
+ String COPY_NAME = "templeton.copy";
+ String STATUSDIR_NAME = "templeton.statusdir";
+ String ENABLE_LOG = "templeton.enablelog";
+ String JOB_TYPE = "templeton.jobtype";
+ String JAR_ARGS_NAME = "templeton.args";
+ String TEMPLETON_JOB_LAUNCH_TIME_NAME = "templeton.job.launch.time";
+ String OVERRIDE_CLASSPATH = "templeton.override-classpath";
+ String STDOUT_FNAME = "stdout";
+ String STDERR_FNAME = "stderr";
+ String EXIT_FNAME = "exit";
+ int WATCHER_TIMEOUT_SECS = 10;
+ int KEEP_ALIVE_MSEC = 60 * 1000;
/*
* The = sign in the string for TOKEN_FILE_ARG_PLACEHOLDER is required because
* org.apache.hadoop.util.GenericOptionsParser.preProcessForWindows() prepares
* arguments expecting an = sign. It will fail to prepare the arguments correctly
* without the = sign present.
- */
- public static final String TOKEN_FILE_ARG_PLACEHOLDER =
+ */ String TOKEN_FILE_ARG_PLACEHOLDER =
"__MR_JOB_CREDENTIALS_OPTION=WEBHCAT_TOKEN_FILE_LOCATION__";
- public static final String TOKEN_FILE_ARG_PLACEHOLDER_TEZ =
+ String TOKEN_FILE_ARG_PLACEHOLDER_TEZ =
"__TEZ_CREDENTIALS_OPTION=WEBHCAT_TOKEN_FILE_LOCATION_TEZ__";
// MRv2 job tag used to identify Templeton launcher child jobs. Each child job
// will be tagged with the parent jobid so that on launcher task restart, all
// previously running child jobs can be killed before the child job is launched
// again.
- public static final String MAPREDUCE_JOB_TAGS = "mapreduce.job.tags";
- public static final String MAPREDUCE_JOB_TAGS_ARG_PLACEHOLDER =
+ String MAPREDUCE_JOB_TAGS = "mapreduce.job.tags";
+ String MAPREDUCE_JOB_TAGS_ARG_PLACEHOLDER =
"__MR_JOB_TAGS_OPTION=MR_JOB_TAGS_JOBID__";
/**
* constants needed for Pig job submission
* The string values here are what Pig expects to see in it's environment
*/
- public static interface PigConstants {
- public static final String HIVE_HOME = "HIVE_HOME";
- public static final String HCAT_HOME = "HCAT_HOME";
- public static final String PIG_OPTS = "PIG_OPTS";
+ interface PigConstants {
+ String HIVE_HOME = "HIVE_HOME";
+ String HCAT_HOME = "HCAT_HOME";
+ String PIG_OPTS = "PIG_OPTS";
}
}
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java
index 6dc27f4..0362c49 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java
@@ -75,7 +75,8 @@
static class AttemptInfo {
public String id;
public URL baseUrl;
- public enum AttemptStatus {COMPLETED, FAILED};
+ public enum AttemptStatus {COMPLETED, FAILED}
+
AttemptStatus status;
public String startTime;
public String endTime;
diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java
index 97d572c..8938182 100644
--- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java
+++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java
@@ -44,18 +44,18 @@
*/
public interface TempletonStorage {
// These are the possible types referenced by 'type' below.
- public enum Type {
+ enum Type {
UNKNOWN, JOB, JOBTRACKING, TEMPLETONOVERHEAD
}
- public static final String STORAGE_CLASS = "templeton.storage.class";
- public static final String STORAGE_ROOT = "templeton.storage.root";
+ String STORAGE_CLASS = "templeton.storage.class";
+ String STORAGE_ROOT = "templeton.storage.root";
/**
* Start the cleanup process for this storage type.
* @param config
*/
- public void startCleanup(Configuration config);
+ void startCleanup(Configuration config);
/**
* Save a single key/value pair for a specific job id.
@@ -64,7 +64,7 @@
* @param key The name of the field to save
* @param val The value of the field to save
*/
- public void saveField(Type type, String id, String key, String val)
+ void saveField(Type type, String id, String key, String val)
throws NotFoundException;
/**
@@ -76,7 +76,7 @@ public void saveField(Type type, String id, String key, String val)
* @return The value of the field requested, or null if not
* found.
*/
- public String getField(Type type, String id, String key);
+ String getField(Type type, String id, String key);
/**
* Get all the name/value pairs stored for this id.
@@ -90,7 +90,7 @@ public void saveField(Type type, String id, String key, String val)
* @param id The String id of this data grouping (jobid, etc.)
* @return A Map of key/value pairs found for this type/id.
*/
- public Map getFields(Type type, String id);
+ Map getFields(Type type, String id);
/**
* Delete a data grouping (all data for a jobid, all tracking data
@@ -102,14 +102,14 @@ public void saveField(Type type, String id, String key, String val)
* @return True if successful, false if not, throws NotFoundException
* if the id wasn't found.
*/
- public boolean delete(Type type, String id) throws NotFoundException;
+ boolean delete(Type type, String id) throws NotFoundException;
/**
* Get the id of each data grouping in the storage system.
*
* @return An ArrayList of ids.
*/
- public List getAll();
+ List getAll();
/**
* Get the id of each data grouping of a given type in the storage
@@ -117,7 +117,7 @@ public void saveField(Type type, String id, String key, String val)
* @param type The data type (as listed above)
* @return An ArrayList of ids.
*/
- public List getAllForType(Type type);
+ List getAllForType(Type type);
/**
* Get the id of each data grouping that has the specific key/value
@@ -126,7 +126,7 @@ public void saveField(Type type, String id, String key, String val)
* @param value The value of the field to search for
* @return An ArrayList of ids.
*/
- public List getAllForKey(String key, String value);
+ List getAllForKey(String key, String value);
/**
* Get the id of each data grouping of a given type that has the
@@ -136,18 +136,17 @@ public void saveField(Type type, String id, String key, String val)
* @param value The value of the field to search for
* @return An ArrayList of ids.
*/
- public List getAllForTypeAndKey(Type type, String key,
- String value);
+ List getAllForTypeAndKey(Type type, String key, String value);
/**
* For storage methods that require a connection, this is a hint
* that it's time to open a connection.
*/
- public void openStorage(Configuration config) throws IOException;
+ void openStorage(Configuration config) throws IOException;
/**
* For storage methods that require a connection, this is a hint
* that it's time to close the connection.
*/
- public void closeStorage() throws IOException;
+ void closeStorage() throws IOException;
}
diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java
index bf05ba9..0030388 100644
--- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java
+++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java
@@ -263,7 +263,7 @@ String getAssertMsg() {
return methodName + " " + submittedURL + " " + responseBody;
}
}
- private static enum HTTP_METHOD_TYPE {GET, POST, DELETE, PUT}
+ private enum HTTP_METHOD_TYPE {GET, POST, DELETE, PUT}
private static MethodCallRetVal doHttpCall(String uri, HTTP_METHOD_TYPE type) throws IOException {
return doHttpCall(uri, type, null, null);
}
diff --git hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java
index 0ad8f89..d748401 100644
--- hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java
+++ hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java
@@ -53,7 +53,7 @@
/** Represents the state a session item can be in. */
public enum WebSessionItemStatus {
NEW, READY, QUERY_SET, QUERY_RUNNING, DESTROY, KILL_QUERY
- };
+ }
/** The Web Interface sessionName this is used to identify the session. */
private final String sessionName;
diff --git hwi/web/session_manage.jsp hwi/web/session_manage.jsp
index ef65289..60160dd 100644
--- hwi/web/session_manage.jsp
+++ hwi/web/session_manage.jsp
@@ -19,7 +19,7 @@
--%>
<%@page import="org.apache.hadoop.hive.hwi.*" %>
<%@page errorPage="error_page.jsp" %>
-<% HWISessionManager hs = (HWISessionManager) application.getAttribute("hs");; %>
+<% HWISessionManager hs = (HWISessionManager) application.getAttribute("hs"); %>
<% HWIAuth auth = (HWIAuth) session.getAttribute("auth"); %>
<% if (auth==null) { %>
diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java
index e65bf4d..6757c22 100644
--- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java
+++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java
@@ -71,7 +71,7 @@ protected static void setup() throws Exception {
}
interface FunctionInvoker {
- public void invoke() throws Exception;
+ void invoke() throws Exception;
}
/**
diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 130fd67..622a288 100644
--- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -349,7 +349,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf
// Test append_partition_by_name
client.appendPartition(dbName, tblName, partName);
Partition part5 = client.getPartition(dbName, tblName, part.getValues());
- assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
+ assertTrue("Append partition by name failed", part5.getValues().equals(vals));
Path part5Path = new Path(part5.getSd().getLocation());
assertTrue(fs.exists(part5Path));
diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java
index ed4b441..92d455b 100644
--- itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java
+++ itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java
@@ -49,7 +49,7 @@
PARTITION,
TABLE_AND_PARTITION,
AUTHORIZATION
- };
+ }
class AuthCallContext {
@@ -81,7 +81,7 @@
private Configuration conf;
public static final Log LOG = LogFactory.getLog(
- DummyHiveMetastoreAuthorizationProvider.class);;
+ DummyHiveMetastoreAuthorizationProvider.class);
@Override
public Configuration getConf() {
diff --git jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
index 5898a6b..ec5e555 100644
--- jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
+++ jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
@@ -160,7 +160,7 @@ public InputStream getBinaryStream(int columnIndex) throws SQLException {
}
public InputStream getBinaryStream(String columnName) throws SQLException {
- return getBinaryStream(findColumn(columnName));
+ return getBinaryStream(findColumn(columnName));
}
public Blob getBlob(int i) throws SQLException {
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index d872be5..581b837 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -51,9 +51,8 @@
* @throws MetaException
* thrown if there is any other erro
*/
- public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname,
- String name, Table newTable) throws InvalidOperationException,
- MetaException;
+ void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newTable)
+ throws InvalidOperationException, MetaException;
/**
* handles alter partition
@@ -75,8 +74,8 @@ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname,
* @throws AlreadyExistsException
* @throws MetaException
*/
- public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
- final String name, final List part_vals, final Partition new_part)
+ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List part_vals, final Partition new_part)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
MetaException;
@@ -98,8 +97,8 @@ public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, fina
* @throws AlreadyExistsException
* @throws MetaException
*/
- public abstract List alterPartitions(final RawStore msdb, Warehouse wh,
- final String dbname, final String name, final List new_part)
+ List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+ final String name, final List new_part)
throws InvalidOperationException, InvalidObjectException, AlreadyExistsException,
MetaException;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
index 570b358..2d76279 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java
@@ -39,7 +39,7 @@
*
* @param table new table definition
*/
- public void preCreateTable(Table table)
+ void preCreateTable(Table table)
throws MetaException;
/**
@@ -48,7 +48,7 @@ public void preCreateTable(Table table)
*
* @param table new table definition
*/
- public void rollbackCreateTable(Table table)
+ void rollbackCreateTable(Table table)
throws MetaException;
/**
@@ -57,7 +57,7 @@ public void rollbackCreateTable(Table table)
*
* @param table new table definition
*/
- public void commitCreateTable(Table table)
+ void commitCreateTable(Table table)
throws MetaException;
/**
@@ -66,7 +66,7 @@ public void commitCreateTable(Table table)
*
* @param table table definition
*/
- public void preDropTable(Table table)
+ void preDropTable(Table table)
throws MetaException;
/**
@@ -75,7 +75,7 @@ public void preDropTable(Table table)
*
* @param table table definition
*/
- public void rollbackDropTable(Table table)
+ void rollbackDropTable(Table table)
throws MetaException;
/**
@@ -87,6 +87,6 @@ public void rollbackDropTable(Table table)
* @param deleteData whether to delete data as well; this should typically
* be ignored in the case of an external table
*/
- public void commitDropTable(Table table, boolean deleteData)
+ void commitDropTable(Table table, boolean deleteData)
throws MetaException;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java
index 1cdae9b..4b32b2a 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java
@@ -33,7 +33,5 @@
*
* @return hook, or null if none registered
*/
- public HiveMetaHook getHook(Table tbl) throws MetaException;
+ HiveMetaHook getHook(Table tbl) throws MetaException;
}
-
-// End HiveMetaHookLoader.java
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index a5ca093..d3fcb38 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -286,7 +286,7 @@ protected Formatter initialValue() {
}
};
- private final void logAuditEvent(String cmd) {
+ private void logAuditEvent(String cmd) {
if (cmd == null) {
return;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 1675751..400a50f 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -23,5 +23,5 @@
public interface IHMSHandler extends ThriftHiveMetastore.Iface {
- public abstract void setConf(Configuration conf);
+ void setConf(Configuration conf);
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index d6e849f..e7e26dc 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -79,14 +79,14 @@
* Returns whether current client is convertible with conf or not
* @return
*/
- public boolean isCompatibleWith(HiveConf conf);
+ boolean isCompatibleWith(HiveConf conf);
/**
* Tries to reconnect this MetaStoreClient to the MetaStore.
*/
- public void reconnect() throws MetaException;
+ void reconnect() throws MetaException;
- public void close();
+ void close();
/**
* Get the names of all databases in the MetaStore that match the given pattern.
@@ -95,7 +95,7 @@
* @throws MetaException
* @throws TException
*/
- public List getDatabases(String databasePattern)
+ List getDatabases(String databasePattern)
throws MetaException, TException;
/**
@@ -104,7 +104,7 @@
* @throws MetaException
* @throws TException
*/
- public List getAllDatabases()
+ List getAllDatabases()
throws MetaException, TException;
/**
@@ -117,7 +117,7 @@
* @throws TException
* @throws UnknownDBException
*/
- public List getTables(String dbName, String tablePattern)
+ List getTables(String dbName, String tablePattern)
throws MetaException, TException, UnknownDBException;
/**
@@ -128,7 +128,7 @@
* @throws TException
* @throws UnknownDBException
*/
- public List getAllTables(String dbName)
+ List getAllTables(String dbName)
throws MetaException, TException, UnknownDBException;
/**
@@ -167,7 +167,7 @@
* The maximum number of tables returned
* @return A list of table names that match the desired filter
*/
- public List listTableNamesByFilter(String dbName, String filter, short maxTables)
+ List listTableNamesByFilter(String dbName, String filter, short maxTables)
throws MetaException, TException, InvalidOperationException, UnknownDBException;
@@ -185,9 +185,8 @@
* @throws TException
* A thrift communication error occurred
*/
- public void dropTable(String dbname, String tableName, boolean deleteData,
- boolean ignoreUknownTab) throws MetaException, TException,
- NoSuchObjectException;
+ void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUknownTab)
+ throws MetaException, TException, NoSuchObjectException;
/**
* Drop the table in the DEFAULT database.
@@ -209,14 +208,14 @@ public void dropTable(String dbname, String tableName, boolean deleteData,
* This method will be removed in release 0.7.0.
*/
@Deprecated
- public void dropTable(String tableName, boolean deleteData)
+ void dropTable(String tableName, boolean deleteData)
throws MetaException, UnknownTableException, TException,
NoSuchObjectException;
- public void dropTable(String dbname, String tableName)
+ void dropTable(String dbname, String tableName)
throws MetaException, TException, NoSuchObjectException;
- public boolean tableExists(String databaseName, String tableName) throws MetaException,
+ boolean tableExists(String databaseName, String tableName) throws MetaException,
TException, UnknownDBException;
/**
@@ -230,7 +229,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc
* This method will be removed in release 0.7.0.
*/
@Deprecated
- public boolean tableExists(String tableName) throws MetaException,
+ boolean tableExists(String tableName) throws MetaException,
TException, UnknownDBException;
/**
@@ -249,7 +248,7 @@ public boolean tableExists(String tableName) throws MetaException,
* This method will be removed in release 0.7.0.
*/
@Deprecated
- public Table getTable(String tableName) throws MetaException, TException,
+ Table getTable(String tableName) throws MetaException, TException,
NoSuchObjectException;
/**
@@ -260,7 +259,7 @@ public Table getTable(String tableName) throws MetaException, TException,
* @throws MetaException Could not fetch the database
* @throws TException A thrift communication error occurred
*/
- public Database getDatabase(String databaseName)
+ Database getDatabase(String databaseName)
throws NoSuchObjectException, MetaException, TException;
@@ -279,7 +278,7 @@ public Database getDatabase(String databaseName)
* @throws NoSuchObjectException
* In case the table wasn't found.
*/
- public Table getTable(String dbName, String tableName) throws MetaException,
+ Table getTable(String dbName, String tableName) throws MetaException,
TException, NoSuchObjectException;
/**
@@ -301,7 +300,7 @@ public Table getTable(String dbName, String tableName) throws MetaException,
* @throws MetaException
* Any other errors
*/
- public List getTableObjectsByName(String dbName, List tableNames)
+ List getTableObjectsByName(String dbName, List tableNames)
throws MetaException, InvalidOperationException, UnknownDBException, TException;
/**
@@ -316,11 +315,10 @@ public Table getTable(String dbName, String tableName) throws MetaException,
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
* java.lang.String, java.util.List)
*/
- public Partition appendPartition(String tableName, String dbName,
- List partVals) throws InvalidObjectException,
- AlreadyExistsException, MetaException, TException;
+ Partition appendPartition(String tableName, String dbName, List partVals)
+ throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
- public Partition appendPartition(String tableName, String dbName, String name)
+ Partition appendPartition(String tableName, String dbName, String name)
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
/**
@@ -338,7 +336,7 @@ public Partition appendPartition(String tableName, String dbName, String name)
* @throws TException
* Thrift exception
*/
- public Partition add_partition(Partition partition)
+ Partition add_partition(Partition partition)
throws InvalidObjectException, AlreadyExistsException, MetaException,
TException;
@@ -356,7 +354,7 @@ public Partition add_partition(Partition partition)
* @throws TException
* Thrift exception
*/
- public int add_partitions(List partitions)
+ int add_partitions(List partitions)
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
/**
@@ -367,8 +365,8 @@ public int add_partitions(List partitions)
* @param needResults Whether the results are needed
* @return the partitions that were added, or null if !needResults
*/
- public List add_partitions(
- List partitions, boolean ifNotExists, boolean needResults)
+ List add_partitions(List partitions, boolean ifNotExists,
+ boolean needResults)
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
/**
@@ -381,8 +379,7 @@ public int add_partitions(List partitions)
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
* java.lang.String, java.util.List)
*/
- public Partition getPartition(String tblName, String dbName,
- List partVals) throws NoSuchObjectException, MetaException, TException;
+ Partition getPartition(String tblName, String dbName, List partVals) throws NoSuchObjectException, MetaException, TException;
/**
* @param partitionSpecs
@@ -392,10 +389,9 @@ public Partition getPartition(String tblName, String dbName,
* @param destTableName
* @return partition object
*/
- public Partition exchange_partition(Map partitionSpecs,
- String sourceDb, String sourceTable, String destdb,
- String destTableName) throws MetaException, NoSuchObjectException,
- InvalidObjectException, TException;
+ Partition exchange_partition(Map partitionSpecs, String sourceDb,
+ String sourceTable, String destdb, String destTableName)
+ throws MetaException, NoSuchObjectException, InvalidObjectException, TException;
/**
* @param dbName
@@ -407,8 +403,8 @@ public Partition exchange_partition(Map partitionSpecs,
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
* java.lang.String, java.util.List)
*/
- public Partition getPartition(String dbName, String tblName,
- String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ Partition getPartition(String dbName, String tblName, String name)
+ throws MetaException, UnknownTableException, NoSuchObjectException, TException;
/**
@@ -423,8 +419,8 @@ public Partition getPartition(String dbName, String tblName,
* @throws NoSuchObjectException
* @throws TException
*/
- public Partition getPartitionWithAuthInfo(String dbName, String tableName,
- List pvals, String userName, List groupNames)
+ Partition getPartitionWithAuthInfo(String dbName, String tableName, List pvals,
+ String userName, List groupNames)
throws MetaException, UnknownTableException, NoSuchObjectException, TException;
/**
@@ -436,17 +432,17 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName,
* @throws MetaException
* @throws TException
*/
- public List listPartitions(String db_name, String tbl_name,
- short max_parts) throws NoSuchObjectException, MetaException, TException;
+ List listPartitions(String db_name, String tbl_name, short max_parts)
+ throws NoSuchObjectException, MetaException, TException;
- public List listPartitions(String db_name, String tbl_name,
- List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
+ List listPartitions(String db_name, String tbl_name, List part_vals,
+ short max_parts) throws NoSuchObjectException, MetaException, TException;
- public List listPartitionNames(String db_name, String tbl_name,
- short max_parts) throws MetaException, TException;
+ List listPartitionNames(String db_name, String tbl_name, short max_parts)
+ throws MetaException, TException;
- public List listPartitionNames(String db_name, String tbl_name,
- List part_vals, short max_parts)
+ List listPartitionNames(String db_name, String tbl_name, List part_vals,
+ short max_parts)
throws MetaException, TException, NoSuchObjectException;
/**
@@ -463,8 +459,8 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName,
* @throws NoSuchObjectException
* @throws TException
*/
- public List listPartitionsByFilter(String db_name, String tbl_name,
- String filter, short max_parts) throws MetaException,
+ List listPartitionsByFilter(String db_name, String tbl_name, String filter,
+ short max_parts) throws MetaException,
NoSuchObjectException, TException;
@@ -480,8 +476,8 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName,
* @param result the resulting list of partitions
* @return whether the resulting list contains partitions which may or may not match the expr
*/
- public boolean listPartitionsByExpr(String db_name, String tbl_name,
- byte[] expr, String default_partition_name, short max_parts, List result)
+ boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+ String default_partition_name, short max_parts, List result)
throws TException;
/**
@@ -493,8 +489,8 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name,
* @return the list of partitions
* @throws NoSuchObjectException
*/
- public List listPartitionsWithAuthInfo(String dbName,
- String tableName, short s, String userName, List groupNames)
+ List listPartitionsWithAuthInfo(String dbName, String tableName, short s,
+ String userName, List groupNames)
throws MetaException, TException, NoSuchObjectException;
/**
@@ -507,8 +503,8 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name,
* @throws MetaException
* @throws TException
*/
- public List getPartitionsByNames(String db_name, String tbl_name,
- List part_names) throws NoSuchObjectException, MetaException, TException;
+ List getPartitionsByNames(String db_name, String tbl_name, List part_names)
+ throws NoSuchObjectException, MetaException, TException;
/**
* @param dbName
@@ -520,9 +516,9 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name,
* @return the list of paritions
* @throws NoSuchObjectException
*/
- public List listPartitionsWithAuthInfo(String dbName,
- String tableName, List partialPvals, short s, String userName,
- List groupNames) throws MetaException, TException, NoSuchObjectException;
+ List listPartitionsWithAuthInfo(String dbName, String tableName,
+ List partialPvals, short s, String userName, List groupNames)
+ throws MetaException, TException, NoSuchObjectException;
/**
* @param db_name
@@ -537,9 +533,10 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name,
* @throws UnknownPartitionException
* @throws InvalidPartitionException
*/
- public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs,
- PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
- UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ void markPartitionForEvent(String db_name, String tbl_name, Map partKVs,
+ PartitionEventType eventType)
+ throws MetaException, NoSuchObjectException, TException, UnknownTableException,
+ UnknownDBException, UnknownPartitionException, InvalidPartitionException;
/**
* @param db_name
@@ -554,16 +551,17 @@ public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs,
- PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
- UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs,
+ PartitionEventType eventType)
+ throws MetaException, NoSuchObjectException, TException, UnknownTableException,
+ UnknownDBException, UnknownPartitionException, InvalidPartitionException;
/**
* @param partVals
* @throws TException
* @throws MetaException
*/
- public void validatePartitionNameCharacters(List partVals)
+ void validatePartitionNameCharacters(List partVals)
throws TException, MetaException;
@@ -577,25 +575,25 @@ public void validatePartitionNameCharacters(List partVals)
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
*/
- public void createTable(Table tbl) throws AlreadyExistsException,
+ void createTable(Table tbl) throws AlreadyExistsException,
InvalidObjectException, MetaException, NoSuchObjectException, TException;
- public void alter_table(String defaultDatabaseName, String tblName,
- Table table) throws InvalidOperationException, MetaException, TException;
+ void alter_table(String defaultDatabaseName, String tblName, Table table)
+ throws InvalidOperationException, MetaException, TException;
- public void createDatabase(Database db)
+ void createDatabase(Database db)
throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
- public void dropDatabase(String name)
+ void dropDatabase(String name)
throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
- public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+ void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
- public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+ void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
- public void alterDatabase(String name, Database db)
+ void alterDatabase(String name, Database db)
throws NoSuchObjectException, MetaException, TException;
/**
@@ -611,17 +609,15 @@ public void alterDatabase(String name, Database db)
* @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
* java.lang.String, java.util.List, boolean)
*/
- public boolean dropPartition(String db_name, String tbl_name,
- List part_vals, boolean deleteData) throws NoSuchObjectException,
- MetaException, TException;
+ boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData)
+ throws NoSuchObjectException, MetaException, TException;
List dropPartitions(String dbName, String tblName,
List> partExprs, boolean deleteData, boolean ignoreProtection,
boolean ifExists) throws NoSuchObjectException, MetaException, TException;
- public boolean dropPartition(String db_name, String tbl_name,
- String name, boolean deleteData) throws NoSuchObjectException,
- MetaException, TException;
+ boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData)
+ throws NoSuchObjectException, MetaException, TException;
/**
* updates a partition to new partition
*
@@ -638,7 +634,7 @@ public boolean dropPartition(String db_name, String tbl_name,
* @throws TException
* if error in communicating with metastore server
*/
- public void alter_partition(String dbName, String tblName, Partition newPart)
+ void alter_partition(String dbName, String tblName, Partition newPart)
throws InvalidOperationException, MetaException, TException;
/**
@@ -657,7 +653,7 @@ public void alter_partition(String dbName, String tblName, Partition newPart)
* @throws TException
* if error in communicating with metastore server
*/
- public void alter_partitions(String dbName, String tblName, List newParts)
+ void alter_partitions(String dbName, String tblName, List newParts)
throws InvalidOperationException, MetaException, TException;
/**
@@ -678,7 +674,8 @@ public void alter_partitions(String dbName, String tblName, List newP
* @throws TException
* if error in communicating with metastore server
*/
- public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart)
+ void renamePartition(final String dbname, final String name, final List part_vals,
+ final Partition newPart)
throws InvalidOperationException, MetaException, TException;
/**
@@ -691,7 +688,7 @@ public void renamePartition(final String dbname, final String name, final List getFields(String db, String tableName)
+ List getFields(String db, String tableName)
throws MetaException, TException, UnknownTableException,
UnknownDBException;
@@ -705,7 +702,7 @@ public void renamePartition(final String dbname, final String name, final List getSchema(String db, String tableName)
+ List getSchema(String db, String tableName)
throws MetaException, TException, UnknownTableException,
UnknownDBException;
@@ -718,7 +715,7 @@ public void renamePartition(final String dbname, final String name, final List partitionNameToVals(String name)
+ List partitionNameToVals(String name)
throws MetaException, TException;
/**
*
@@ -739,7 +736,7 @@ public String getConfigValue(String name, String defaultValue)
* @throws MetaException
* @throws TException
*/
- public Map partitionNameToSpec(String name)
+ Map partitionNameToSpec(String name)
throws MetaException, TException;
/**
@@ -751,11 +748,11 @@ public String getConfigValue(String name, String defaultValue)
* @throws TException
* @throws AlreadyExistsException
*/
- public void createIndex(Index index, Table indexTable) throws InvalidObjectException,
+ void createIndex(Index index, Table indexTable) throws InvalidObjectException,
MetaException, NoSuchObjectException, TException, AlreadyExistsException;
- public void alter_index(String dbName, String tblName, String indexName,
- Index index) throws InvalidOperationException, MetaException, TException;
+ void alter_index(String dbName, String tblName, String indexName, Index index)
+ throws InvalidOperationException, MetaException, TException;
/**
*
@@ -768,7 +765,7 @@ public void alter_index(String dbName, String tblName, String indexName,
* @throws NoSuchObjectException
* @throws TException
*/
- public Index getIndex(String dbName, String tblName, String indexName)
+ Index getIndex(String dbName, String tblName, String indexName)
throws MetaException, UnknownTableException, NoSuchObjectException,
TException;
@@ -783,8 +780,8 @@ public Index getIndex(String dbName, String tblName, String indexName)
* @throws MetaException
* @throws TException
*/
- public List listIndexes(String db_name, String tbl_name,
- short max) throws NoSuchObjectException, MetaException, TException;
+ List listIndexes(String db_name, String tbl_name, short max)
+ throws NoSuchObjectException, MetaException, TException;
/**
* list all the index names of the give base table.
@@ -796,8 +793,8 @@ public Index getIndex(String dbName, String tblName, String indexName)
* @throws MetaException
* @throws TException
*/
- public List listIndexNames(String db_name, String tbl_name,
- short max) throws MetaException, TException;
+ List listIndexNames(String db_name, String tbl_name, short max)
+ throws MetaException, TException;
/**
* @param db_name
@@ -809,9 +806,8 @@ public Index getIndex(String dbName, String tblName, String indexName)
* @throws MetaException
* @throws TException
*/
- public boolean dropIndex(String db_name, String tbl_name,
- String name, boolean deleteData) throws NoSuchObjectException,
- MetaException, TException;
+ boolean dropIndex(String db_name, String tbl_name, String name, boolean deleteData)
+ throws NoSuchObjectException, MetaException, TException;
/**
* Write table level column statistics to persistent store
@@ -824,7 +820,7 @@ public boolean dropIndex(String db_name, String tbl_name,
* @throws InvalidInputException
*/
- public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+ boolean updateTableColumnStatistics(ColumnStatistics statsObj)
throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
InvalidInputException;
@@ -839,7 +835,7 @@ public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
* @throws InvalidInputException
*/
- public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
InvalidInputException;
@@ -847,15 +843,15 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
* Get table column statistics given dbName, tableName and multiple colName-s
* @return ColumnStatistics struct for a given db, table and columns
*/
- public List getTableColumnStatistics(String dbName, String tableName,
- List colNames) throws NoSuchObjectException, MetaException, TException;
+ List getTableColumnStatistics(String dbName, String tableName,
+ List colNames) throws NoSuchObjectException, MetaException, TException;
/**
* Get partitions column statistics given dbName, tableName, multiple partitions and colName-s
* @return ColumnStatistics struct for a given db, table and columns
*/
- public Map> getPartitionColumnStatistics(String dbName,
- String tableName, List partNames, List colNames)
+ Map> getPartitionColumnStatistics(String dbName,
+ String tableName, List partNames, List colNames)
throws NoSuchObjectException, MetaException, TException;
/**
@@ -872,8 +868,8 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
* @throws InvalidInputException
*/
- public boolean deletePartitionColumnStatistics(String dbName, String tableName,
- String partName, String colName) throws NoSuchObjectException, MetaException,
+ boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+ String colName) throws NoSuchObjectException, MetaException,
InvalidObjectException, TException, InvalidInputException;
/**
@@ -889,7 +885,7 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName,
* @throws InvalidInputException
*/
- public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws
+ boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws
NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException;
/**
@@ -899,7 +895,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri
* @throws MetaException
* @throws TException
*/
- public boolean create_role(Role role)
+ boolean create_role(Role role)
throws MetaException, TException;
/**
@@ -910,7 +906,7 @@ public boolean create_role(Role role)
* @throws MetaException
* @throws TException
*/
- public boolean drop_role(String role_name) throws MetaException, TException;
+ boolean drop_role(String role_name) throws MetaException, TException;
/**
* list all role names
@@ -918,7 +914,7 @@ public boolean create_role(Role role)
* @throws TException
* @throws MetaException
*/
- public List listRoleNames() throws MetaException, TException;
+ List listRoleNames() throws MetaException, TException;
/**
*
@@ -932,9 +928,9 @@ public boolean create_role(Role role)
* @throws MetaException
* @throws TException
*/
- public boolean grant_role(String role_name, String user_name,
- PrincipalType principalType, String grantor, PrincipalType grantorType,
- boolean grantOption) throws MetaException, TException;
+ boolean grant_role(String role_name, String user_name, PrincipalType principalType,
+ String grantor, PrincipalType grantorType, boolean grantOption)
+ throws MetaException, TException;
/**
* @param role_name
@@ -947,8 +943,8 @@ public boolean grant_role(String role_name, String user_name,
* @throws MetaException
* @throws TException
*/
- public boolean revoke_role(String role_name, String user_name,
- PrincipalType principalType, boolean grantOption) throws MetaException, TException;
+ boolean revoke_role(String role_name, String user_name, PrincipalType principalType,
+ boolean grantOption) throws MetaException, TException;
/**
*
@@ -958,7 +954,7 @@ public boolean revoke_role(String role_name, String user_name,
* @throws MetaException
* @throws TException
*/
- public List list_roles(String principalName, PrincipalType principalType)
+ List list_roles(String principalName, PrincipalType principalType)
throws MetaException, TException;
/**
@@ -971,8 +967,8 @@ public boolean revoke_role(String role_name, String user_name,
* @throws MetaException
* @throws TException
*/
- public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
- String user_name, List group_names) throws MetaException,
+ PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String user_name,
+ List group_names) throws MetaException,
TException;
/**
@@ -984,8 +980,8 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
* @throws MetaException
* @throws TException
*/
- public List list_privileges(String principal_name,
- PrincipalType principal_type, HiveObjectRef hiveObject)
+ List list_privileges(String principal_name, PrincipalType principal_type,
+ HiveObjectRef hiveObject)
throws MetaException, TException;
/**
@@ -994,7 +990,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
* @throws MetaException
* @throws TException
*/
- public boolean grant_privileges(PrivilegeBag privileges)
+ boolean grant_privileges(PrivilegeBag privileges)
throws MetaException, TException;
/**
@@ -1003,7 +999,7 @@ public boolean grant_privileges(PrivilegeBag privileges)
* @throws MetaException
* @throws TException
*/
- public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption)
+ boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption)
throws MetaException, TException;
/**
@@ -1013,7 +1009,7 @@ public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption)
* @throws MetaException
* @throws TException
*/
- public String getDelegationToken(String owner, String renewerKerberosPrincipalName)
+ String getDelegationToken(String owner, String renewerKerberosPrincipalName)
throws MetaException, TException;
/**
@@ -1022,28 +1018,28 @@ public String getDelegationToken(String owner, String renewerKerberosPrincipalNa
* @throws MetaException
* @throws TException
*/
- public long renewDelegationToken(String tokenStrForm) throws MetaException, TException;
+ long renewDelegationToken(String tokenStrForm) throws MetaException, TException;
/**
* @param tokenStrForm
* @throws MetaException
* @throws TException
*/
- public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException;
+ void cancelDelegationToken(String tokenStrForm) throws MetaException, TException;
- public void createFunction(Function func)
+ void createFunction(Function func)
throws InvalidObjectException, MetaException, TException;
- public void alterFunction(String dbName, String funcName, Function newFunction)
+ void alterFunction(String dbName, String funcName, Function newFunction)
throws InvalidObjectException, MetaException, TException;
- public void dropFunction(String dbName, String funcName) throws MetaException,
+ void dropFunction(String dbName, String funcName) throws MetaException,
NoSuchObjectException, InvalidObjectException, InvalidInputException, TException;
- public Function getFunction(String dbName, String funcName)
+ Function getFunction(String dbName, String funcName)
throws MetaException, TException;
- public List getFunctions(String dbName, String pattern)
+ List getFunctions(String dbName, String pattern)
throws MetaException, TException;
/**
@@ -1051,7 +1047,7 @@ public Function getFunction(String dbName, String funcName)
* @return list of valid transactions
* @throws TException
*/
- public ValidTxnList getValidTxns() throws TException;
+ ValidTxnList getValidTxns() throws TException;
/**
* Initiate a transaction.
@@ -1061,7 +1057,7 @@ public Function getFunction(String dbName, String funcName)
* @return transaction identifier
* @throws TException
*/
- public long openTxn(String user) throws TException;
+ long openTxn(String user) throws TException;
/**
* Initiate a batch of transactions. It is not guaranteed that the
@@ -1088,7 +1084,7 @@ public Function getFunction(String dbName, String funcName)
* optimistically assuming that the result matches the request.
* @throws TException
*/
- public OpenTxnsResponse openTxns(String user, int numTxns) throws TException;
+ OpenTxnsResponse openTxns(String user, int numTxns) throws TException;
/**
* Rollback a transaction. This will also unlock any locks associated with
@@ -1099,7 +1095,7 @@ public Function getFunction(String dbName, String funcName)
* deleted.
* @throws TException
*/
- public void rollbackTxn(long txnid) throws NoSuchTxnException, TException;
+ void rollbackTxn(long txnid) throws NoSuchTxnException, TException;
/**
* Commit a transaction. This will also unlock any locks associated with
@@ -1112,7 +1108,7 @@ public Function getFunction(String dbName, String funcName)
* aborted. This can result from the transaction timing out.
* @throws TException
*/
- public void commitTxn(long txnid)
+ void commitTxn(long txnid)
throws NoSuchTxnException, TxnAbortedException, TException;
/**
@@ -1122,7 +1118,7 @@ public void commitTxn(long txnid)
* @return List of currently opened transactions, included aborted ones.
* @throws TException
*/
- public GetOpenTxnsInfoResponse showTxns() throws TException;
+ GetOpenTxnsInfoResponse showTxns() throws TException;
/**
* Request a set of locks. All locks needed for a particular query, DML,
@@ -1152,7 +1148,7 @@ public void commitTxn(long txnid)
* aborted. This can result from the transaction timing out.
* @throws TException
*/
- public LockResponse lock(LockRequest request)
+ LockResponse lock(LockRequest request)
throws NoSuchTxnException, TxnAbortedException, TException;
/**
@@ -1176,7 +1172,7 @@ public LockResponse lock(LockRequest request)
* This can result from the lock timing out and being unlocked by the system.
* @throws TException
*/
- public LockResponse checkLock(long lockid)
+ LockResponse checkLock(long lockid)
throws NoSuchTxnException, TxnAbortedException, NoSuchLockException,
TException;
@@ -1191,7 +1187,7 @@ public LockResponse checkLock(long lockid)
* transaction.
* @throws TException
*/
- public void unlock(long lockid)
+ void unlock(long lockid)
throws NoSuchLockException, TxnOpenException, TException;
/**
@@ -1199,7 +1195,7 @@ public void unlock(long lockid)
* @return List of currently held and waiting locks.
* @throws TException
*/
- public ShowLocksResponse showLocks() throws TException;
+ ShowLocksResponse showLocks() throws TException;
/**
* Send a heartbeat to indicate that the client holding these locks (if
@@ -1221,7 +1217,7 @@ public void unlock(long lockid)
* This can result from the lock timing out and being unlocked by the system.
* @throws TException
*/
- public void heartbeat(long txnid, long lockid)
+ void heartbeat(long txnid, long lockid)
throws NoSuchLockException, NoSuchTxnException, TxnAbortedException,
TException;
@@ -1235,7 +1231,7 @@ public void heartbeat(long txnid, long lockid)
* have already been closed) and which were aborted.
* @throws TException
*/
- public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException;
+ HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException;
/**
* Send a request to compact a table or partition. This will not block until the compaction is
@@ -1250,7 +1246,7 @@ public void heartbeat(long txnid, long lockid)
* @param type Whether this is a major or minor compaction.
* @throws TException
*/
- public void compact(String dbname, String tableName, String partitionName, CompactionType type)
+ void compact(String dbname, String tableName, String partitionName, CompactionType type)
throws TException;
/**
@@ -1259,9 +1255,9 @@ public void compact(String dbname, String tableName, String partitionName, Comp
* in progress, and finished but waiting to clean the existing files.
* @throws TException
*/
- public ShowCompactResponse showCompactions() throws TException;
+ ShowCompactResponse showCompactions() throws TException;
- public class IncompatibleMetastoreException extends MetaException {
+ class IncompatibleMetastoreException extends MetaException {
public IncompatibleMetastoreException(String message) {
super(message);
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index a23d122..8ce292d 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -739,7 +739,7 @@ protected boolean shouldStop() {
return filterBuffer.hasError();
}
- private static enum FilterType {
+ private enum FilterType {
Integral,
String,
Date,
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
index a141793..b284089 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
@@ -36,7 +36,7 @@
* @return true on success
* @throws MetaException
*/
- public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
- Configuration conf) throws MetaException;
+ boolean deleteDir(FileSystem fs, Path f, boolean recursive, Configuration conf)
+ throws MetaException;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 4f186f4..5c7b663 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -150,7 +150,7 @@
private static Lock pmfPropLock = new ReentrantLock();
private static final Log LOG = LogFactory.getLog(ObjectStore.class.getName());
- private static enum TXN_STATUS {
+ private enum TXN_STATUS {
NO_STATE, OPEN, COMMITED, ROLLBACK
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
index 0787775..12d4ce8 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java
@@ -33,7 +33,7 @@
* @param expr Serialized expression.
* @return The filter string.
*/
- public String convertExprToFilter(byte[] expr) throws MetaException;
+ String convertExprToFilter(byte[] expr) throws MetaException;
/**
* Filters the partition names via serialized Hive expression.
@@ -43,6 +43,6 @@
* @param partitionNames Partition names; the list is modified in place.
* @return Whether there were any unknown partitions preserved in the name list.
*/
- public boolean filterPartitionsByExpr(List columnNames, byte[] expr,
- String defaultPartitionName, List partitionNames) throws MetaException;
+ boolean filterPartitionsByExpr(List columnNames, byte[] expr, String defaultPartitionName,
+ List partitionNames) throws MetaException;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 2379ce7..5a98d41 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -63,10 +63,10 @@
*/
@Target(value = ElementType.METHOD)
@Retention(value = RetentionPolicy.RUNTIME)
- public @interface CanNotRetry {
+ @interface CanNotRetry {
}
- public abstract void shutdown();
+ void shutdown();
/**
* Opens a new one or the one already created Every call of this function must
@@ -75,7 +75,7 @@
* @return an active transaction
*/
- public abstract boolean openTransaction();
+ boolean openTransaction();
/**
* if this is the commit of the first open call then an actual commit is
@@ -84,66 +84,65 @@
* @return true or false
*/
@CanNotRetry
- public abstract boolean commitTransaction();
+ boolean commitTransaction();
/**
* Rolls back the current transaction if it is active
*/
@CanNotRetry
- public abstract void rollbackTransaction();
+ void rollbackTransaction();
- public abstract void createDatabase(Database db)
+ void createDatabase(Database db)
throws InvalidObjectException, MetaException;
- public abstract Database getDatabase(String name)
+ Database getDatabase(String name)
throws NoSuchObjectException;
- public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException;
+ boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException;
- public abstract boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException;
+ boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException;
- public abstract List getDatabases(String pattern) throws MetaException;
+ List getDatabases(String pattern) throws MetaException;
- public abstract List getAllDatabases() throws MetaException;
+ List getAllDatabases() throws MetaException;
- public abstract boolean createType(Type type);
+ boolean createType(Type type);
- public abstract Type getType(String typeName);
+ Type getType(String typeName);
- public abstract boolean dropType(String typeName);
+ boolean dropType(String typeName);
- public abstract void createTable(Table tbl) throws InvalidObjectException,
+ void createTable(Table tbl) throws InvalidObjectException,
MetaException;
- public abstract boolean dropTable(String dbName, String tableName)
+ boolean dropTable(String dbName, String tableName)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
- public abstract Table getTable(String dbName, String tableName)
+ Table getTable(String dbName, String tableName)
throws MetaException;
- public abstract boolean addPartition(Partition part)
+ boolean addPartition(Partition part)
throws InvalidObjectException, MetaException;
- public abstract boolean addPartitions(String dbName, String tblName, List parts)
+ boolean addPartitions(String dbName, String tblName, List parts)
throws InvalidObjectException, MetaException;
- public abstract Partition getPartition(String dbName, String tableName,
- List part_vals) throws MetaException, NoSuchObjectException;
+ Partition getPartition(String dbName, String tableName, List part_vals)
+ throws MetaException, NoSuchObjectException;
- public abstract boolean doesPartitionExist(String dbName, String tableName,
- List part_vals) throws MetaException, NoSuchObjectException;
+ boolean doesPartitionExist(String dbName, String tableName, List part_vals)
+ throws MetaException, NoSuchObjectException;
- public abstract boolean dropPartition(String dbName, String tableName,
- List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
- InvalidInputException;
+ boolean dropPartition(String dbName, String tableName, List part_vals)
+ throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
- public abstract List getPartitions(String dbName,
- String tableName, int max) throws MetaException, NoSuchObjectException;
+ List getPartitions(String dbName, String tableName, int max)
+ throws MetaException, NoSuchObjectException;
- public abstract void alterTable(String dbname, String name, Table newTable)
+ void alterTable(String dbname, String name, Table newTable)
throws InvalidObjectException, MetaException;
- public List getTables(String dbName, String pattern)
+ List getTables(String dbName, String pattern)
throws MetaException;
/**
@@ -156,10 +155,10 @@ public abstract void alterTable(String dbname, String name, Table newTable)
* If there are duplicate names, only one instance of the table will be returned
* @throws MetaException
*/
- public List getTableObjectsByName(String dbname, List tableNames)
+ List getTableObjectsByName(String dbname, List tableNames)
throws MetaException, UnknownDBException;
- public List getAllTables(String dbName) throws MetaException;
+ List getAllTables(String dbName) throws MetaException;
/**
* Gets a list of tables based on a filter string and filter type.
@@ -173,116 +172,115 @@ public abstract void alterTable(String dbname, String name, Table newTable)
* @throws MetaException
* @throws UnknownDBException
*/
- public abstract List listTableNamesByFilter(String dbName,
- String filter, short max_tables) throws MetaException, UnknownDBException;
+ List listTableNamesByFilter(String dbName, String filter, short max_tables)
+ throws MetaException, UnknownDBException;
- public abstract List listPartitionNames(String db_name,
- String tbl_name, short max_parts) throws MetaException;
+ List listPartitionNames(String db_name, String tbl_name, short max_parts)
+ throws MetaException;
- public abstract List listPartitionNamesByFilter(String db_name,
- String tbl_name, String filter, short max_parts) throws MetaException;
+ List listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
+ short max_parts) throws MetaException;
- public abstract void alterPartition(String db_name, String tbl_name, List part_vals,
- Partition new_part) throws InvalidObjectException, MetaException;
+ void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part)
+ throws InvalidObjectException, MetaException;
- public abstract void alterPartitions(String db_name, String tbl_name,
- List> part_vals_list, List new_parts)
+ void alterPartitions(String db_name, String tbl_name, List> part_vals_list,
+ List new_parts)
throws InvalidObjectException, MetaException;
- public abstract boolean addIndex(Index index)
+ boolean addIndex(Index index)
throws InvalidObjectException, MetaException;
- public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException;
+ Index getIndex(String dbName, String origTableName, String indexName) throws MetaException;
- public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException;
+ boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException;
- public abstract List getIndexes(String dbName,
- String origTableName, int max) throws MetaException;
+ List getIndexes(String dbName, String origTableName, int max) throws MetaException;
- public abstract List listIndexNames(String dbName,
- String origTableName, short max) throws MetaException;
+ List listIndexNames(String dbName, String origTableName, short max) throws MetaException;
- public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
+ void alterIndex(String dbname, String baseTblName, String name, Index newIndex)
throws InvalidObjectException, MetaException;
- public abstract List getPartitionsByFilter(
- String dbName, String tblName, String filter, short maxParts)
+ List getPartitionsByFilter(String dbName, String tblName, String filter,
+ short maxParts)
throws MetaException, NoSuchObjectException;
- public abstract boolean getPartitionsByExpr(String dbName, String tblName,
- byte[] expr, String defaultPartitionName, short maxParts, List result)
+ boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
+ String defaultPartitionName, short maxParts, List result)
throws TException;
- public abstract List getPartitionsByNames(
- String dbName, String tblName, List partNames)
+ List getPartitionsByNames(String dbName, String tblName, List partNames)
throws MetaException, NoSuchObjectException;
- public abstract Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ Table markPartitionForEvent(String dbName, String tblName, Map partVals,
+ PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException;
- public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName,
+ PartitionEventType evtType)
+ throws MetaException, UnknownTableException, InvalidPartitionException,
+ UnknownPartitionException;
- public abstract boolean addRole(String rowName, String ownerName)
+ boolean addRole(String rowName, String ownerName)
throws InvalidObjectException, MetaException, NoSuchObjectException;
- public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
+ boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
- public abstract boolean grantRole(Role role, String userName, PrincipalType principalType,
- String grantor, PrincipalType grantorType, boolean grantOption)
+ boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+ PrincipalType grantorType, boolean grantOption)
throws MetaException, NoSuchObjectException, InvalidObjectException;
- public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType,
- boolean grantOption) throws MetaException, NoSuchObjectException;
+ boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+ throws MetaException, NoSuchObjectException;
- public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
- List groupNames) throws InvalidObjectException, MetaException;
+ PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames)
+ throws InvalidObjectException, MetaException;
- public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName,
- List groupNames) throws InvalidObjectException, MetaException;
+ PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, List groupNames)
+ throws InvalidObjectException, MetaException;
- public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName,
- String userName, List groupNames) throws InvalidObjectException, MetaException;
+ PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, String userName,
+ List groupNames) throws InvalidObjectException, MetaException;
- public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName,
- String partition, String userName, List groupNames) throws InvalidObjectException, MetaException;
+ PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, String partition,
+ String userName, List groupNames) throws InvalidObjectException, MetaException;
- public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName,
- String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException;
+ PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, String partitionName,
+ String columnName, String userName, List groupNames)
+ throws InvalidObjectException, MetaException;
- public abstract List listPrincipalGlobalGrants(String principalName,
- PrincipalType principalType);
+ List listPrincipalGlobalGrants(String principalName,
+ PrincipalType principalType);
- public abstract List listPrincipalDBGrants(String principalName,
- PrincipalType principalType, String dbName);
+ List listPrincipalDBGrants(String principalName, PrincipalType principalType,
+ String dbName);
- public abstract List listAllTableGrants(
- String principalName, PrincipalType principalType, String dbName,
- String tableName);
+ List listAllTableGrants(String principalName, PrincipalType principalType,
+ String dbName, String tableName);
- public abstract List listPrincipalPartitionGrants(
- String principalName, PrincipalType principalType, String dbName,
- String tableName, String partName);
+ List listPrincipalPartitionGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, String partName);
- public abstract List listPrincipalTableColumnGrants(
- String principalName, PrincipalType principalType, String dbName,
- String tableName, String columnName);
+ List listPrincipalTableColumnGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, String columnName);
- public abstract List listPrincipalPartitionColumnGrants(
- String principalName, PrincipalType principalType, String dbName,
- String tableName, String partName, String columnName);
+ List listPrincipalPartitionColumnGrants(String principalName,
+ PrincipalType principalType, String dbName, String tableName, String partName,
+ String columnName);
- public abstract boolean grantPrivileges (PrivilegeBag privileges)
+ boolean grantPrivileges(PrivilegeBag privileges)
throws InvalidObjectException, MetaException, NoSuchObjectException;
- public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption)
+ boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
throws InvalidObjectException, MetaException, NoSuchObjectException;
- public abstract org.apache.hadoop.hive.metastore.api.Role getRole(
- String roleName) throws NoSuchObjectException;
+ org.apache.hadoop.hive.metastore.api.Role getRole(String roleName) throws NoSuchObjectException;
- public List listRoleNames();
+ List listRoleNames();
- public List listRoles(String principalName,
- PrincipalType principalType);
+ List listRoles(String principalName, PrincipalType principalType);
/**
@@ -290,15 +288,15 @@ public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean gran
* @param roleName
* @return
*/
- public List listRoleMembers(String roleName);
+ List listRoleMembers(String roleName);
- public abstract Partition getPartitionWithAuth(String dbName, String tblName,
- List partVals, String user_name, List group_names)
+ Partition getPartitionWithAuth(String dbName, String tblName, List partVals,
+ String user_name, List group_names)
throws MetaException, NoSuchObjectException, InvalidObjectException;
- public abstract List getPartitionsWithAuth(String dbName,
- String tblName, short maxParts, String userName, List groupNames)
+ List getPartitionsWithAuth(String dbName, String tblName, short maxParts,
+ String userName, List groupNames)
throws MetaException, NoSuchObjectException, InvalidObjectException;
/**
@@ -316,8 +314,8 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName,
* @throws MetaException
* @throws NoSuchObjectException
*/
- public abstract List listPartitionNamesPs(String db_name, String tbl_name,
- List part_vals, short max_parts)
+ List listPartitionNamesPs(String db_name, String tbl_name, List part_vals,
+ short max_parts)
throws MetaException, NoSuchObjectException;
/**
@@ -341,8 +339,8 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName,
* @throws NoSuchObjectException
* @throws InvalidObjectException
*/
- public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name,
- List part_vals, short max_parts, String userName, List groupNames)
+ List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals,
+ short max_parts, String userName, List groupNames)
throws MetaException, InvalidObjectException, NoSuchObjectException;
/** Persists the given column statistics object to the metastore
@@ -356,7 +354,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName,
* @throws InvalidObjectException
* @throws InvalidInputException
*/
- public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats)
+ boolean updateTableColumnStatistics(ColumnStatistics colStats)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
/** Persists the given column statistics object to the metastore
@@ -370,8 +368,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats)
* @throws InvalidObjectException
* @throws InvalidInputException
*/
- public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
- List partVals)
+ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
/**
@@ -387,15 +384,15 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb
* @throws InvalidInputException
*
*/
- public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
- List colName) throws MetaException, NoSuchObjectException;
+ ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List colName)
+ throws MetaException, NoSuchObjectException;
/**
* Returns the relevant column statistics for given columns in given partitions in a given
* table in a given database if such statistics exist.
*/
- public abstract List getPartitionColumnStatistics(
- String dbName, String tblName, List partNames, List colNames)
+ List getPartitionColumnStatistics(String dbName, String tblName,
+ List partNames, List colNames)
throws MetaException, NoSuchObjectException;
/**
@@ -415,8 +412,8 @@ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String
* @throws InvalidInputException
*/
- public abstract boolean deletePartitionColumnStatistics(String dbName, String tableName,
- String partName, List partVals, String colName)
+ boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+ List partVals, String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
/**
@@ -434,34 +431,33 @@ public abstract boolean deletePartitionColumnStatistics(String dbName, String ta
* @throws InvalidInputException
*/
- public abstract boolean deleteTableColumnStatistics(String dbName, String tableName,
- String colName)
+ boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
- public abstract long cleanupEvents();
+ long cleanupEvents();
- public abstract boolean addToken(String tokenIdentifier, String delegationToken);
+ boolean addToken(String tokenIdentifier, String delegationToken);
- public abstract boolean removeToken(String tokenIdentifier);
+ boolean removeToken(String tokenIdentifier);
- public abstract String getToken(String tokenIdentifier);
+ String getToken(String tokenIdentifier);
- public abstract List getAllTokenIdentifiers();
+ List getAllTokenIdentifiers();
- public abstract int addMasterKey(String key) throws MetaException;
+ int addMasterKey(String key) throws MetaException;
- public abstract void updateMasterKey(Integer seqNo, String key)
+ void updateMasterKey(Integer seqNo, String key)
throws NoSuchObjectException, MetaException;
- public abstract boolean removeMasterKey(Integer keySeq);
+ boolean removeMasterKey(Integer keySeq);
- public abstract String[] getMasterKeys();
+ String[] getMasterKeys();
- public abstract void verifySchema() throws MetaException;
+ void verifySchema() throws MetaException;
- public abstract String getMetaStoreSchemaVersion() throws MetaException;
+ String getMetaStoreSchemaVersion() throws MetaException;
- public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
+ void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
void dropPartitions(String dbName, String tblName, List partNames)
throws MetaException, NoSuchObjectException;
@@ -502,7 +498,7 @@ void dropPartitions(String dbName, String tblName, List partNames)
* @throws InvalidObjectException
* @throws MetaException
*/
- public void createFunction(Function func)
+ void createFunction(Function func)
throws InvalidObjectException, MetaException;
/**
@@ -513,7 +509,7 @@ public void createFunction(Function func)
* @throws InvalidObjectException
* @throws MetaException
*/
- public void alterFunction(String dbName, String funcName, Function newFunction)
+ void alterFunction(String dbName, String funcName, Function newFunction)
throws InvalidObjectException, MetaException;
/**
@@ -526,7 +522,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction)
* @throws InvalidObjectException
* @throws InvalidInputException
*/
- public void dropFunction(String dbName, String funcName)
+ void dropFunction(String dbName, String funcName)
throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
/**
@@ -536,7 +532,7 @@ public void dropFunction(String dbName, String funcName)
* @return
* @throws MetaException
*/
- public Function getFunction(String dbName, String funcName) throws MetaException;
+ Function getFunction(String dbName, String funcName) throws MetaException;
/**
* Retrieve list of function names based on name pattern.
@@ -545,6 +541,6 @@ public void dropFunction(String dbName, String funcName)
* @return
* @throws MetaException
*/
- public List getFunctions(String dbName, String pattern) throws MetaException;
+ List getFunctions(String dbName, String pattern) throws MetaException;
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
index 4499485..956d96a 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
@@ -27,7 +27,7 @@
public abstract class PreEventContext {
- public static enum PreEventType {
+ public enum PreEventType {
CREATE_TABLE,
DROP_TABLE,
ALTER_TABLE,
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
index 4fa841b..4b1afc4 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
@@ -36,14 +36,11 @@
* @return the connection URL
* @throws Exception
*/
- public String getJdoConnectionUrl(Configuration conf)
- throws Exception;
+ String getJdoConnectionUrl(Configuration conf) throws Exception;
/**
* Alerts this that the connection URL was bad. Can be used to collect stats,
* etc.
- *
- * @param url
*/
- public void notifyBadConnectionUrl(String url);
+ void notifyBadConnectionUrl(String url);
}
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
index b8d1afc..306a12d 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java
@@ -68,13 +68,13 @@
private final String sqlOp;
// private constructor
- private Operator(String op){
+ Operator(String op){
this.op = op;
this.jdoOp = op;
this.sqlOp = op;
}
- private Operator(String op, String jdoOp, String sqlOp){
+ Operator(String op, String jdoOp, String sqlOp){
this.op = op;
this.jdoOp = jdoOp;
this.sqlOp = sqlOp;
diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index 4246d68..dac63d7 100644
--- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -461,7 +461,6 @@
STATS_SKIPPING_BY_ERROR(30017, "Skipping stats aggregation by error {0}", true),
ORC_CORRUPTED_READ(30018, "Corruption in ORC data encountered. To skip reading corrupted "
+ "data, set " + HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA + " to true");
- ;
private int errorCode;
private String mesg;
@@ -573,20 +572,20 @@ public static String findSQLState(String mesg) {
return error.getSQLState();
}
- private ErrorMsg(int errorCode, String mesg) {
+ ErrorMsg(int errorCode, String mesg) {
this(errorCode, mesg, "42000", false);
}
- private ErrorMsg(int errorCode, String mesg, boolean format) {
+ ErrorMsg(int errorCode, String mesg, boolean format) {
// 42000 is the generic SQLState for syntax error.
this(errorCode, mesg, "42000", format);
}
- private ErrorMsg(int errorCode, String mesg, String sqlState) {
+ ErrorMsg(int errorCode, String mesg, String sqlState) {
this(errorCode, mesg, sqlState, false);
}
- private ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) {
+ ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) {
this.errorCode = errorCode;
this.mesg = mesg;
this.sqlState = sqlState;
diff --git ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java
index 4dafeac..72d6d38 100644
--- ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java
+++ ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java
@@ -34,13 +34,11 @@
* Invoked before Hive begins any processing of a command in the Driver,
* notably before compilation and any customizable performance logging.
*/
- public void preDriverRun(
- HiveDriverRunHookContext hookContext) throws Exception;
+ void preDriverRun(HiveDriverRunHookContext hookContext) throws Exception;
/**
* Invoked after Hive performs any processing of a command, just before a
* response is returned to the entity calling the Driver.
*/
- public void postDriverRun(
- HiveDriverRunHookContext hookContext) throws Exception;
+ void postDriverRun(HiveDriverRunHookContext hookContext) throws Exception;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java
index 777730b..7fa86ef 100644
--- ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java
+++ ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java
@@ -25,6 +25,6 @@
* HiveDriverRunHook.
*/
public interface HiveDriverRunHookContext extends Configurable{
- public String getCommand();
- public void setCommand(String command);
+ String getCommand();
+ void setCommand(String command);
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java
index 5d78ce0..de7301b 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java
@@ -25,12 +25,12 @@
public interface BucketMatcher {
- public List getAliasBucketFiles(String currentInputFile, String refTableAlias, String alias);
+ List getAliasBucketFiles(String currentInputFile, String refTableAlias, String alias);
- public void setAliasBucketFileNameMapping(
- Map>> aliasBucketFileNameMapping);
+ void setAliasBucketFileNameMapping(
+ Map>> aliasBucketFileNameMapping);
- public Map