diff --git beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java index 378f1b0..d4f3f10 100644 --- beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java +++ beeline/src/java/org/apache/hive/beeline/BeeLineSignalHandler.java @@ -26,10 +26,6 @@ import java.sql.Statement; -/** - * BeeLineSignalHandler. - * - */ public interface BeeLineSignalHandler { - public void setStatement(Statement stmt); + void setStatement(Statement stmt); } diff --git beeline/src/java/org/apache/hive/beeline/CommandHandler.java beeline/src/java/org/apache/hive/beeline/CommandHandler.java index 0441bce..179d9ec 100644 --- beeline/src/java/org/apache/hive/beeline/CommandHandler.java +++ beeline/src/java/org/apache/hive/beeline/CommandHandler.java @@ -35,19 +35,19 @@ /** * @return the name of the command */ - public String getName(); + String getName(); /** * @return all the possible names of this command. */ - public String[] getNames(); + String[] getNames(); /** * @return the short help description for this command. */ - public String getHelpText(); + String getHelpText(); /** @@ -58,7 +58,7 @@ * the command line to check. * @return the command string that matches, or null if it no match */ - public String matches(String line); + String matches(String line); /** @@ -67,11 +67,11 @@ * @param line * the full command line to execute. */ - public boolean execute(String line); + boolean execute(String line); /** * Returns the completors that can handle parameters. */ - public Completor[] getParameterCompletors(); -} \ No newline at end of file + Completor[] getParameterCompletors(); +} diff --git beeline/src/java/org/apache/hive/beeline/Commands.java beeline/src/java/org/apache/hive/beeline/Commands.java index a92d69f..b8635a0 100644 --- beeline/src/java/org/apache/hive/beeline/Commands.java +++ beeline/src/java/org/apache/hive/beeline/Commands.java @@ -817,7 +817,6 @@ public boolean quit(String line) { public boolean closeall(String line) { if (close(null)) { while (close(null)) { - ; } return true; } diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java index a21fa65..35e0d2d 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java @@ -28,58 +28,58 @@ public interface NestedScriptParser { - public enum CommandType { + enum CommandType { PARTIAL_STATEMENT, TERMINATED_STATEMENT, COMMENT } - static final String DEFAUTL_DELIMITER = ";"; + String DEFAUTL_DELIMITER = ";"; /*** * Find the type of given command * @param dbCommand * @return */ - public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException; + boolean isPartialCommand(String dbCommand) throws IllegalArgumentException; /** Parse the DB specific nesting format and extract the inner script name if any * @param dbCommand command from parent script * @return * @throws IllegalFormatException */ - public String getScriptName(String dbCommand) throws IllegalArgumentException; + String getScriptName(String dbCommand) throws IllegalArgumentException; /*** * Find if the given command is a nested script execution * @param dbCommand * @return */ - public boolean isNestedScript(String dbCommand); + boolean isNestedScript(String dbCommand); /*** * Find if the given command is should be passed to DB * @param dbCommand * @return */ - public boolean isNonExecCommand(String dbCommand); + boolean isNonExecCommand(String dbCommand); /*** * Get the SQL statement delimiter * @return */ - public String getDelimiter(); + String getDelimiter(); /*** * Clear any client specific tags * @return */ - public String cleanseCommand(String dbCommand); + String cleanseCommand(String dbCommand); /*** * Does the DB required table/column names quoted * @return */ - public boolean needsQuotedIdentifier(); + boolean needsQuotedIdentifier(); } @@ -88,7 +88,7 @@ * abstractCommandParser. * */ - private static abstract class AbstractCommandParser implements NestedScriptParser { + private abstract static class AbstractCommandParser implements NestedScriptParser { @Override public boolean isPartialCommand(String dbCommand) throws IllegalArgumentException{ diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java index de3ad4e..a2c3c1e 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java @@ -281,7 +281,7 @@ public void doInit(String toVersion) throws HiveMetaException { } } catch (IOException e) { throw new HiveMetaException("Schema initialization FAILED!" + - " Metastore state would be inconsistent !!", e); + " Metastore state would be inconsistent !!", e); } } diff --git beeline/src/java/org/apache/hive/beeline/SQLCompletor.java beeline/src/java/org/apache/hive/beeline/SQLCompletor.java index 844b9ae..3772d8c 100644 --- beeline/src/java/org/apache/hive/beeline/SQLCompletor.java +++ beeline/src/java/org/apache/hive/beeline/SQLCompletor.java @@ -74,7 +74,6 @@ public SQLCompletor(BeeLine beeLine, boolean skipmeta) for (StringTokenizer tok = new StringTokenizer(keywords, ", "); tok.hasMoreTokens(); completions .add(tok.nextToken())) { - ; } // now add the tables and columns from the current connection diff --git beeline/src/java/org/apache/hive/beeline/util/QFileClient.java beeline/src/java/org/apache/hive/beeline/util/QFileClient.java index b62a883..1df0c05 100644 --- beeline/src/java/org/apache/hive/beeline/util/QFileClient.java +++ beeline/src/java/org/apache/hive/beeline/util/QFileClient.java @@ -135,7 +135,7 @@ void initFilterSet() { .addFilter(userName, "!!{user.name}!!") .addFilter(operatorPattern, "\"$1_!!ELIDED!!\"") ; - }; + } public QFileClient setUsername(String username) { this.username = username; @@ -241,7 +241,7 @@ private void runQFileTest() throws Exception { if (1 != beeLine.runCommands(new String[] { "!run " + qFileDirectory + "/" + qFileName })) { hasErrors = true; } - + beeLine.runCommands(new String[] { "!record" }); } diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java index 63668bc..b64212d 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java @@ -499,7 +499,7 @@ public int getStatus() { public enum ClientResult { RETURN_OK, RETURN_SERVER_EXCEPTION, RETURN_T_EXCEPTION - }; + } private final ClientResult result; @@ -533,7 +533,6 @@ public HiveClient getClient() { when(result.fetchN(anyInt())).thenThrow(exception); } catch (TException e) { - ; } return result; } else if (ClientResult.RETURN_T_EXCEPTION.equals(this.result)) { diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java index 63b9371..6039e4e 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java @@ -115,7 +115,6 @@ public void run() { output.write(buffer, 0, read); } } catch (IOException e) { - ; } } diff --git common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java index e41f87c..265ed85 100644 --- common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -116,19 +116,18 @@ public String getAggregator(Configuration conf) { /** * @return List of all supported statistics */ - public static final String[] supportedStats = new String[] - {NUM_FILES,ROW_COUNT,TOTAL_SIZE,RAW_DATA_SIZE}; + public static final String[] supportedStats = {NUM_FILES,ROW_COUNT,TOTAL_SIZE,RAW_DATA_SIZE}; /** * @return List of all statistics that need to be collected during query execution. These are * statistics that inherently require a scan of the data. */ - public static final String[] statsRequireCompute = new String[] {ROW_COUNT,RAW_DATA_SIZE}; + public static final String[] statsRequireCompute = {ROW_COUNT,RAW_DATA_SIZE}; /** * @return List of statistics that can be collected quickly without requiring a scan of the data. */ - public static final String[] fastStats = new String[] {NUM_FILES,TOTAL_SIZE}; + public static final String[] fastStats = {NUM_FILES,TOTAL_SIZE}; // This string constant is used by stats task to indicate to AlterHandler that // alterPartition/alterTable is happening via statsTask. @@ -143,7 +142,7 @@ public String getAggregator(Configuration conf) { public static final String FALSE = "false"; public static boolean areStatsUptoDate(Map params) { - String statsAcc = params.get(COLUMN_STATS_ACCURATE); - return statsAcc == null ? false : statsAcc.equals(TRUE); + String statsAcc = params.get(COLUMN_STATS_ACCURATE); + return TRUE.equals(statsAcc); } } diff --git common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java index bcb3276..e4018e4 100644 --- common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java +++ common/src/java/org/apache/hadoop/hive/common/ValidTxnList.java @@ -29,13 +29,13 @@ * Key used to store valid txn list in a * {@link org.apache.hadoop.conf.Configuration} object. */ - public static final String VALID_TXNS_KEY = "hive.txn.valid.txns"; + String VALID_TXNS_KEY = "hive.txn.valid.txns"; /** * The response to a range query. NONE means no values in this range match, * SOME mean that some do, and ALL means that every value does. */ - public enum RangeResponse {NONE, SOME, ALL}; + enum RangeResponse {NONE, SOME, ALL} /** * Indicates whether a given transaction has been committed and should be @@ -43,7 +43,7 @@ * @param txnid id for the transaction * @return true if committed, false otherwise */ - public boolean isTxnCommitted(long txnid); + boolean isTxnCommitted(long txnid); /** * Find out if a range of transaction ids have been committed. @@ -52,31 +52,31 @@ * @return Indicate whether none, some, or all of these transactions have been * committed. */ - public RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId); + RangeResponse isTxnRangeCommitted(long minTxnId, long maxTxnId); /** * Write this validTxnList into a string. This should produce a string that * can be used by {@link #readFromString(String)} to populate a validTxnsList. */ - public String writeToString(); + String writeToString(); /** * Populate this validTxnList from the string. It is assumed that the string * was created via {@link #writeToString()}. * @param src source string. */ - public void readFromString(String src); + void readFromString(String src); /** * Get the largest committed transaction id. * @return largest committed transaction id */ - public long getHighWatermark(); + long getHighWatermark(); /** * Get the list of transactions under the high water mark that are still * open. * @return a list of open transaction ids */ - public long[] getOpenTransactions(); + long[] getOpenTransactions(); } diff --git common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java index 1334ed6..bcefca2 100644 --- common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java +++ common/src/java/org/apache/hadoop/hive/common/classification/InterfaceAudience.java @@ -26,19 +26,19 @@ /** * Intended for use by any project or application. */ - @Documented public @interface Public {}; + @Documented public @interface Public {} /** * Intended only for the project(s) specified in the annotation */ @Documented public @interface LimitedPrivate { String[] value(); - }; + } /** * Intended for use only within Hive itself. */ - @Documented public @interface Private {}; + @Documented public @interface Private {} private InterfaceAudience() {} // Audience can't exist on its own } diff --git common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java index 2a12806..c72a916 100644 --- common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java +++ common/src/java/org/apache/hadoop/hive/common/classification/InterfaceStability.java @@ -25,22 +25,22 @@ */ public class InterfaceStability { /** - * Can evolve while retaining compatibility for minor release boundaries.; + * Can evolve while retaining compatibility for minor release boundaries. * can break compatibility only at major release (ie. at m.0). */ @Documented - public @interface Stable {}; - + public @interface Stable {} + /** * Evolving, but can break compatibility at minor release (i.e. m.x) */ @Documented - public @interface Evolving {}; - + public @interface Evolving {} + /** * No guarantee is provided as to reliability or stability across any * level of release granularity. */ @Documented - public @interface Unstable {}; + public @interface Unstable {} } diff --git common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java index 9ca8e6d..dbedf2e 100644 --- common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java +++ common/src/java/org/apache/hadoop/hive/common/cli/IHiveFileProcessor.java @@ -29,5 +29,5 @@ * @param fileName the name of the file * @exception IOException if an I/O error occurs. */ - public int processFile(String fileName) throws IOException; + int processFile(String fileName) throws IOException; } diff --git common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java index 19946d9..7c6d11b 100644 --- common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java +++ common/src/java/org/apache/hadoop/hive/common/metrics/MetricsMBean.java @@ -30,7 +30,7 @@ /** * Check if we're tracking a certain named key/metric */ - public abstract boolean hasKey(String name); + boolean hasKey(String name); /** * Add a key/metric and its value to track @@ -38,7 +38,7 @@ * @param value value associated with the key * @throws Exception */ - public abstract void put(String name, Object value) throws IOException; + void put(String name, Object value) throws IOException; /** * @@ -46,11 +46,11 @@ * @return value associated with the key * @throws Exception */ - public abstract Object get(String name) throws IOException; - + Object get(String name) throws IOException; + /** - * Removes all the keys and values from this MetricsMBean. + * Removes all the keys and values from this MetricsMBean. */ void clear(); } diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index ed22dbd..75c404c 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -181,7 +181,7 @@ * with non-null values to this list as they will override any values defined * in the underlying Hadoop configuration. */ - public static enum ConfVars { + public enum ConfVars { // QL execution stuff SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""), PLAN("hive.exec.plan", "", ""), @@ -282,9 +282,9 @@ LOCALMODEAUTO("hive.exec.mode.local.auto", false, "Let Hive determine whether to run in local mode automatically"), - LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, + LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."), - LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4, + LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4, "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."), DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true, @@ -355,7 +355,7 @@ "The number of times to retry a HMSHandler call if there were a connection error"), HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000, "The number of milliseconds between HMSHandler retry attempts"), - HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, + HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, "Whether to force reloading of the HMSHandler configuration (including\n" + "the connection URL, before the next metastore query that accesses the\n" + "datastore. Once reloaded, this value is reset to false. Used for\n" + @@ -368,7 +368,7 @@ "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."), METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original", - "_INTERMEDIATE_ORIGINAL", + "_INTERMEDIATE_ORIGINAL", "Intermediate dir suffixes used for archiving. Not important what they\n" + "are, as long as collisions are avoided"), METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived", @@ -544,7 +544,7 @@ HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false, "Whether to log Hive query, query plan, runtime statistics etc."), - HIVEQUERYSTRING("hive.query.string", "", + HIVEQUERYSTRING("hive.query.string", "", "Query being executed (might be multiple per a session)"), HIVEQUERYID("hive.query.id", "", @@ -783,8 +783,8 @@ " for small ORC files. Note that enabling this config will not honor padding tolerance\n" + " config (hive.exec.orc.block.padding.tolerance)."), HIVEMERGEINPUTFORMATSTRIPELEVEL("hive.merge.input.format.stripe.level", - "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat", - "Input file format to use for ORC stripe level merging (for internal use only)"), + "org.apache.hadoop.hive.ql.io.orc.OrcFileStripeMergeInputFormat", + "Input file format to use for ORC stripe level merging (for internal use only)"), HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS( "hive.merge.current.job.has.dynamic.partitions", false, ""), @@ -799,7 +799,7 @@ HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""), HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M - HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f, + HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f, "Maximum fraction of heap that can be used by ORC file writers"), HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null, "Define the version of the file to write"), @@ -1080,8 +1080,8 @@ "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."), HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout", 30, "Timeout value (number of seconds) used by JDBC connection and statements."), - HIVE_STATS_ATOMIC("hive.stats.atomic", false, - "whether to update metastore stats only if all stats are available"), + HIVE_STATS_ATOMIC("hive.stats.atomic", false, + "whether to update metastore stats only if all stats are available"), HIVE_STATS_RETRIES_MAX("hive.stats.retries.max", 0, "Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. \n" + "Default is no tries on failures."), @@ -1638,7 +1638,7 @@ "Exceeding this will trigger a flush irrelevant of memory pressure condition."), HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1, "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."), - + HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, ""), diff --git common/src/java/org/apache/hadoop/hive/conf/Validator.java common/src/java/org/apache/hadoop/hive/conf/Validator.java index cea9c41..c5faabc 100644 --- common/src/java/org/apache/hadoop/hive/conf/Validator.java +++ common/src/java/org/apache/hadoop/hive/conf/Validator.java @@ -31,7 +31,7 @@ String validate(String value); - static class StringSet implements Validator { + class StringSet implements Validator { private final Set expected = new LinkedHashSet(); @@ -50,7 +50,7 @@ public String validate(String value) { } } - static enum RANGE_TYPE { + enum RANGE_TYPE { INT { @Override protected boolean inRange(String value, Object lower, Object upper) { @@ -90,7 +90,7 @@ public static RANGE_TYPE valueOf(Object lower, Object upper) { protected abstract boolean inRange(String value, Object lower, Object upper); } - static class RangeValidator implements Validator { + class RangeValidator implements Validator { private final RANGE_TYPE type; private final Object lower, upper; @@ -117,7 +117,7 @@ public String validate(String value) { } } - static class PatternSet implements Validator { + class PatternSet implements Validator { private final List expected = new ArrayList(); @@ -141,7 +141,7 @@ public String validate(String value) { } } - static class RatioValidator implements Validator { + class RatioValidator implements Validator { @Override public String validate(String value) { diff --git common/src/java/org/apache/hive/common/util/HiveStringUtils.java common/src/java/org/apache/hive/common/util/HiveStringUtils.java index c21c937..6aeebd4 100644 --- common/src/java/org/apache/hive/common/util/HiveStringUtils.java +++ common/src/java/org/apache/hive/common/util/HiveStringUtils.java @@ -51,6 +51,7 @@ @InterfaceStability.Unstable public class HiveStringUtils { + /** * Priority of the StringUtils shutdown hook. */ @@ -636,13 +637,13 @@ public static String getHostname() { catch(UnknownHostException uhe) {return "" + uhe;} } - + /** * The traditional binary prefixes, kilo, mega, ..., exa, * which can be represented by a 64-bit integer. * TraditionalBinaryPrefix symbol are case insensitive. */ - public static enum TraditionalBinaryPrefix { + public enum TraditionalBinaryPrefix { KILO(1024), MEGA(KILO.value << 10), GIGA(MEGA.value << 10), diff --git contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java index fa4074f..2e06cb3 100644 --- contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java +++ contrib/src/java/org/apache/hadoop/hive/contrib/fileformat/base64/Base64TextInputFormat.java @@ -112,7 +112,6 @@ public boolean next(LongWritable key, BytesWritable value) throws IOException { int i; for (i = 0; i < binaryData.length && i < signature.length && binaryData[i] == signature[i]; ++i) { - ; } // return the row only if it's not corrupted diff --git contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java index 0c13f5e..6ef2e77 100644 --- contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java +++ contrib/src/java/org/apache/hadoop/hive/contrib/mr/GenericMR.java @@ -31,21 +31,21 @@ /** * This class attempts to provide a simple framework for writing Hive map/reduce * tasks in java. - * + * * The main benefit is that it deals with grouping the keys together for reduce * tasks. - * + * * Additionally, it deals with all system io... and provides something closer to * the hadoop m/r. - * + * * As an example, here's the wordcount reduce: - * + * * new GenericMR().reduce(System.in, System.out, new Reducer() { public void * reduce(String key, Iterator records, Output output) throws * Exception { int count = 0; - * + * * while (records.hasNext()) { count += Integer.parseInt(records.next()[1]); } - * + * * output.collect(new String[] { key, String.valueOf(count) }); }}); */ public final class GenericMR { @@ -96,7 +96,7 @@ private void handle(final Reader in, final Writer out, } } - private static interface RecordProcessor { + private interface RecordProcessor { void processNext(final RecordReader reader, final Output output) throws Exception; } @@ -169,7 +169,7 @@ private boolean hasNext() { private void close() throws Exception { reader.close(); - + } } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java index 63a4d62..84f0060 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/ErrorType.java @@ -72,7 +72,7 @@ * @param errorCode the error code * @param errorMessage the error message */ - private ErrorType(int errorCode, String errorMessage) { + ErrorType(int errorCode, String errorMessage) { this.errorCode = errorCode; this.errorMessage = errorMessage; } @@ -83,7 +83,7 @@ private ErrorType(int errorCode, String errorMessage) { * @param errorMessage the error message * @param appendCauseMessage should causal exception message be appended to error message */ - private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage) { + ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage) { this.errorCode = errorCode; this.errorMessage = errorMessage; this.appendCauseMessage = appendCauseMessage; @@ -96,7 +96,7 @@ private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage * @param appendCauseMessage should causal exception message be appended to error message * @param isRetriable is this a retriable error */ - private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage, boolean isRetriable) { + ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage, boolean isRetriable) { this.errorCode = errorCode; this.errorMessage = errorMessage; this.appendCauseMessage = appendCauseMessage; diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java index cd0f177..e7b7202 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/schema/HCatFieldSchema.java @@ -32,11 +32,11 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public class HCatFieldSchema implements Serializable { -/*the implementation of HCatFieldSchema is a bit messy since with the addition of parametrized -types (e.g. char(7)) we need to represent something richer than an enum but for backwards +/*the implementation of HCatFieldSchema is a bit messy since with the addition of parametrized +types (e.g. char(7)) we need to represent something richer than an enum but for backwards compatibility (and effort required to do full refactoring) this class has both 'type' and 'typeInfo'; similarly for mapKeyType/mapKeyTypeInfo */ - + public enum Type { /*this captures mapping of Hive type names to HCat type names; in the long run * we should just use Hive types directly but that is a larger refactoring effort @@ -54,23 +54,23 @@ CHAR(PrimitiveObjectInspector.PrimitiveCategory.CHAR), VARCHAR(PrimitiveObjectInspector.PrimitiveCategory.VARCHAR), BINARY(PrimitiveObjectInspector.PrimitiveCategory.BINARY), - DATE(PrimitiveObjectInspector.PrimitiveCategory.DATE), - TIMESTAMP(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP), + DATE(PrimitiveObjectInspector.PrimitiveCategory.DATE), + TIMESTAMP(PrimitiveObjectInspector.PrimitiveCategory.TIMESTAMP), ARRAY(ObjectInspector.Category.LIST), MAP(ObjectInspector.Category.MAP), STRUCT(ObjectInspector.Category.STRUCT); - + private final ObjectInspector.Category category; private final PrimitiveObjectInspector.PrimitiveCategory primitiveCategory; - private Type(ObjectInspector.Category cat) { + Type(ObjectInspector.Category cat) { category = cat; primitiveCategory = null; - assert category != ObjectInspector.Category.PRIMITIVE : + assert category != ObjectInspector.Category.PRIMITIVE : "This c'tor should be used for complex category types"; } - private Type(PrimitiveObjectInspector.PrimitiveCategory primCat) { + Type(PrimitiveObjectInspector.PrimitiveCategory primCat) { category = ObjectInspector.Category.PRIMITIVE; primitiveCategory = primCat; } @@ -218,7 +218,7 @@ public HCatFieldSchema(String fieldName, Type type, String comment) throws HCatE public HCatFieldSchema(String fieldName, PrimitiveTypeInfo typeInfo, String comment) throws HCatException { this.fieldName = fieldName; - //HCatUtil.assertNotNull(fieldName, "fieldName cannot be null");//seems sometimes it can be + //HCatUtil.assertNotNull(fieldName, "fieldName cannot be null");//seems sometimes it can be // null, for ARRAY types in particular (which may be a complex type) this.category = Category.PRIMITIVE; this.typeInfo = typeInfo; @@ -263,7 +263,7 @@ private void setName(String name) { */ public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema mapValueSchema, String comment) throws HCatException { assertTypeInCategory(type, Category.MAP, fieldName); - //Hive only supports primitive map keys: + //Hive only supports primitive map keys: //https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types#LanguageManualTypes-ComplexTypes assertTypeInCategory(mapKeyType, Category.PRIMITIVE, fieldName); this.fieldName = fieldName; @@ -274,16 +274,16 @@ public HCatFieldSchema(String fieldName, Type type, Type mapKeyType, HCatSchema this.subSchema.get(0).setName(null); this.comment = comment; } - public static HCatFieldSchema createMapTypeFieldSchema(String fieldName, PrimitiveTypeInfo mapKeyType, - HCatSchema mapValueSchema, + public static HCatFieldSchema createMapTypeFieldSchema(String fieldName, PrimitiveTypeInfo mapKeyType, + HCatSchema mapValueSchema, String comment) throws HCatException { - HCatFieldSchema mapSchema = new HCatFieldSchema(fieldName, Type.MAP, - Type.getPrimitiveHType(mapKeyType), + HCatFieldSchema mapSchema = new HCatFieldSchema(fieldName, Type.MAP, + Type.getPrimitiveHType(mapKeyType), mapValueSchema, comment); mapSchema.mapKeyTypeInfo = mapKeyType; return mapSchema; } - + public HCatSchema getStructSubSchema() throws HCatException { assertTypeInCategory(this.type, Category.STRUCT, this.fieldName); @@ -395,11 +395,11 @@ public boolean equals(Object obj) { @Override public int hashCode() { - //result could be cached if this object were to be made immutable... + //result could be cached if this object were to be made immutable... int result = 17; result = 31 * result + (category == null ? 0 : category.hashCode()); result = 31 * result + (fieldName == null ? 0 : fieldName.hashCode()); - result = 31 * result + (getTypeString() == null ? 0 : + result = 31 * result + (getTypeString() == null ? 0 : getTypeString().hashCode()); return result; } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java index edf3654..246cb0f 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/ReaderContext.java @@ -36,6 +36,6 @@ * as one slave can be used to read multiple splits. * @return number of splits */ - public int numSplits(); + int numSplits(); } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java index 06bba68..1545a0a 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/data/transfer/state/StateProvider.java @@ -31,5 +31,5 @@ * * @return id */ - public int getId(); + int getId(); } diff --git hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java index ae60030..b8b76a5 100644 --- hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java +++ hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/HCatBaseStorer.java @@ -100,7 +100,7 @@ /** * valid values for ON_OOR_VALUE_OPT */ - public static enum OOR_VALUE_OPT_VALUES {Null, Throw} + public enum OOR_VALUE_OPT_VALUES {Null, Throw} protected String sign; //it's key that this is a per HCatStorer instance object private final DataLossLogger dataLossLogger = new DataLossLogger(); @@ -171,7 +171,7 @@ protected HCatSchema convertPigSchemaToHCatSchema(Schema pigSchema, HCatSchema t throw new FrontendException(he.getMessage(), PigHCatUtil.PIG_EXCEPTION_CODE, he); } } - + HCatSchema s = new HCatSchema(fieldSchemas); LOG.debug("convertPigSchemaToHCatSchema(computed)=(" + s + ")"); return s; @@ -189,7 +189,7 @@ public static boolean removeTupleFromBag(HCatFieldSchema hcatFieldSchema, FieldS return false; } /** - * Here we are processing HCat table schema as derived from metastore, + * Here we are processing HCat table schema as derived from metastore, * thus it should have information about all fields/sub-fields, but not for partition columns */ private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema hcatFieldSchema, @@ -273,7 +273,7 @@ private HCatFieldSchema getHCatFSFromPigFS(FieldSchema fSchema, HCatFieldSchema List valFSList = new ArrayList(1); if (hcatFieldSchema != null) { - return HCatFieldSchema.createMapTypeFieldSchema(fSchema.alias, hcatFieldSchema.getMapKeyTypeInfo(), + return HCatFieldSchema.createMapTypeFieldSchema(fSchema.alias, hcatFieldSchema.getMapKeyTypeInfo(), hcatFieldSchema.getMapValueSchema(), ""); } @@ -402,7 +402,7 @@ private Object getJavaObj(Object pigObj, HCatFieldSchema hcatFS) throws HCatExce return HiveDecimal.create(bd); case CHAR: String charVal = (String)pigObj; - CharTypeInfo cti = (CharTypeInfo)hcatFS.getTypeInfo(); + CharTypeInfo cti = (CharTypeInfo)hcatFS.getTypeInfo(); if(charVal.length() > cti.getLength()) { handleOutOfRangeValue(pigObj, hcatFS); return null; @@ -454,7 +454,7 @@ private void handleOutOfRangeValue(Object pigObj, HCatFieldSchema hcatFS) throws /** * depending on user config, throws an exception or logs a msg if the incoming Pig value is * out-of-range for target type. - * @param additionalMsg may be {@code null} + * @param additionalMsg may be {@code null} */ private void handleOutOfRangeValue(Object pigObj, HCatFieldSchema hcatFS, String additionalMsg) throws BackendException { String msg = "Pig value '" + pigObj + "' is outside the bounds of column " + hcatFS.getName() + @@ -507,8 +507,8 @@ protected void doSchemaValidations(Schema pigSchema, HCatSchema tblSchema) throw * @throws HCatException * @throws FrontendException */ - private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField, - Schema topLevelPigSchema, HCatSchema topLevelHCatSchema, + private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField, + Schema topLevelPigSchema, HCatSchema topLevelHCatSchema, int columnPos) throws HCatException, FrontendException { validateAlias(pigField.alias); @@ -528,7 +528,7 @@ private void validateSchema(FieldSchema pigField, HCatFieldSchema hcatField, case DataType.BAG: HCatSchema arrayElementSchema = hcatField == null ? null : hcatField.getArrayElementSchema(); for (FieldSchema innerField : pigField.schema.getField(0).schema.getFields()) { - validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema), + validateSchema(innerField, getColFromSchema(pigField.alias, arrayElementSchema), topLevelPigSchema, topLevelHCatSchema, columnPos); } break; @@ -564,7 +564,7 @@ else if(hcatField != null) { throwTypeMismatchException(type, Lists.newArrayList(Type.BOOLEAN), hcatField, columnPos); break; case DataType.CHARARRAY: - throwTypeMismatchException(type, Lists.newArrayList(Type.STRING, Type.CHAR, Type.VARCHAR), + throwTypeMismatchException(type, Lists.newArrayList(Type.STRING, Type.CHAR, Type.VARCHAR), hcatField, columnPos); break; case DataType.DOUBLE: @@ -574,15 +574,15 @@ else if(hcatField != null) { throwTypeMismatchException(type, Lists.newArrayList(Type.FLOAT), hcatField, columnPos); break; case DataType.INTEGER: - throwTypeMismatchException(type, Lists.newArrayList(Type.INT, Type.BIGINT, + throwTypeMismatchException(type, Lists.newArrayList(Type.INT, Type.BIGINT, Type.TINYINT, Type.SMALLINT), hcatField, columnPos); break; case DataType.LONG: throwTypeMismatchException(type, Lists.newArrayList(Type.BIGINT), hcatField, columnPos); break; default: - throw new FrontendException("'" + type + - "' Pig datatype in column " + columnPos + "(0-based) is not supported by HCat", + throw new FrontendException("'" + type + + "' Pig datatype in column " + columnPos + "(0-based) is not supported by HCat", PigHCatUtil.PIG_EXCEPTION_CODE); } } @@ -596,12 +596,12 @@ else if(hcatField != null) { } } private static void throwTypeMismatchException(byte pigDataType, - List hcatRequiredType, HCatFieldSchema hcatActualField, + List hcatRequiredType, HCatFieldSchema hcatActualField, int columnPos) throws FrontendException { if(!hcatRequiredType.contains(hcatActualField.getType())) { - throw new FrontendException( - "Pig '" + DataType.findTypeName(pigDataType) + "' type in column " + - columnPos + "(0-based) cannot map to HCat '" + + throw new FrontendException( + "Pig '" + DataType.findTypeName(pigDataType) + "' type in column " + + columnPos + "(0-based) cannot map to HCat '" + hcatActualField.getType() + "'type. Target filed must be of HCat type {" + StringUtils.join(hcatRequiredType, " or ") + "}"); } @@ -639,7 +639,7 @@ public void storeStatistics(ResourceStatistics stats, String arg1, Job job) thro } /** - * todo: when job is complete, should print the msgCount table to log + * todo: when job is complete, should print the msgCount table to log */ private static final class DataLossLogger { private static final Map msgCount = new HashMap(); diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java index 4d77057..67a7110 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/HCatEventMessage.java @@ -30,7 +30,7 @@ /** * Enumeration of all supported types of Metastore operations. */ - public static enum EventType { + public enum EventType { CREATE_DATABASE(HCatConstants.HCAT_CREATE_DATABASE_EVENT), DROP_DATABASE(HCatConstants.HCAT_DROP_DATABASE_EVENT), diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java index 24277c4..1fd84a8 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/RecordWriter.java @@ -26,18 +26,18 @@ * @param transactionId the ID of the Txn in which the write occurs * @param record the record to be written */ - public void write(long transactionId, byte[] record) throws StreamingException; + void write(long transactionId, byte[] record) throws StreamingException; /** Flush records from buffer. Invoked by TransactionBatch.commit() */ - public void flush() throws StreamingException; + void flush() throws StreamingException; /** Clear bufferred writes. Invoked by TransactionBatch.abort() */ - public void clear() throws StreamingException; + void clear() throws StreamingException; /** Acquire a new RecordUpdater. Invoked when * StreamingConnection.fetchTransactionBatch() is called */ - public void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException; + void newBatch(Long minTxnId, Long maxTxnID) throws StreamingException; /** Close the RecordUpdater. Invoked by TransactionBatch.close() */ - public void closeBatch() throws StreamingException; + void closeBatch() throws StreamingException; } diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java index 25acff0..8d640d6 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/StreamingConnection.java @@ -37,13 +37,12 @@ * @throws StreamingException * @return a batch of transactions */ - public TransactionBatch fetchTransactionBatch(int numTransactionsHint, - RecordWriter writer) + TransactionBatch fetchTransactionBatch(int numTransactionsHint, RecordWriter writer) throws ConnectionError, StreamingException, InterruptedException; /** * Close connection */ - public void close(); + void close(); } diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java index d9a083d..b4603b6 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/TransactionBatch.java @@ -29,46 +29,46 @@ * */ public interface TransactionBatch { - public enum TxnState {INACTIVE, OPEN, COMMITTED, ABORTED } + enum TxnState {INACTIVE, OPEN, COMMITTED, ABORTED } /** * Activate the next available transaction in the current transaction batch * @throws StreamingException if not able to switch to next Txn * @throws InterruptedException if call in interrupted */ - public void beginNextTransaction() throws StreamingException, InterruptedException; + void beginNextTransaction() throws StreamingException, InterruptedException; /** * Get Id of currently open transaction * @return transaction id */ - public Long getCurrentTxnId(); + Long getCurrentTxnId(); /** * get state of current transaction */ - public TxnState getCurrentTransactionState(); + TxnState getCurrentTransactionState(); /** * Commit the currently open transaction * @throws StreamingException if there are errors committing * @throws InterruptedException if call in interrupted */ - public void commit() throws StreamingException, InterruptedException; + void commit() throws StreamingException, InterruptedException; /** * Abort the currently open transaction * @throws StreamingException if there are errors * @throws InterruptedException if call in interrupted */ - public void abort() throws StreamingException, InterruptedException; + void abort() throws StreamingException, InterruptedException; /** * Remaining transactions are the ones that are not committed or aborted or open. * Current open transaction is not considered part of remaining txns. * @return number of transactions remaining this batch. */ - public int remainingTransactions(); + int remainingTransactions(); /** @@ -77,14 +77,14 @@ * @throws StreamingException if there are errors when writing * @throws InterruptedException if call in interrupted */ - public void write(byte[] record) throws StreamingException, InterruptedException; + void write(byte[] record) throws StreamingException, InterruptedException; /** * Write records using RecordWriter * @throws StreamingException if there are errors when writing * @throws InterruptedException if call in interrupted */ - public void write(Collection records) throws StreamingException, InterruptedException; + void write(Collection records) throws StreamingException, InterruptedException; /** @@ -92,12 +92,12 @@ * to keep them from expiring * @throws StreamingException if there are errors */ - public void heartbeat() throws StreamingException; + void heartbeat() throws StreamingException; /** * Close the TransactionBatch * @throws StreamingException if there are errors closing batch * @throws InterruptedException if call in interrupted */ - public void close() throws StreamingException, InterruptedException; + void close() throws StreamingException, InterruptedException; } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java index 6f42b7b..6fb98af 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/ExecService.java @@ -25,11 +25,9 @@ import org.apache.commons.exec.ExecuteException; public interface ExecService { - public ExecBean run(String program, List args, - Map env) + ExecBean run(String program, List args, Map env) throws NotAuthorizedException, BusyException, ExecuteException, IOException; - public ExecBean runUnlimited(String program, List args, - Map env) + ExecBean runUnlimited(String program, List args, Map env) throws NotAuthorizedException, ExecuteException, IOException; } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java index c7e92cf..d9a5c10 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/LauncherDelegator.java @@ -47,7 +47,7 @@ public class LauncherDelegator extends TempletonDelegator { private static final Log LOG = LogFactory.getLog(LauncherDelegator.class); protected String runAs = null; - static public enum JobType {JAR, STREAMING, PIG, HIVE, SQOOP} + public enum JobType {JAR, STREAMING, PIG, HIVE, SQOOP} private boolean secureMeatastoreAccess = false; private final String HIVE_SHIMS_FILENAME_PATTERN = ".*hive-shims.*"; @@ -90,7 +90,7 @@ public EnqueueBean enqueueController(String user, Map userArgs, if (id == null) { throw new QueueException("Unable to get job id"); } - + registerJob(id, user, callback, userArgs); return new EnqueueBean(id); @@ -246,7 +246,7 @@ public static void addDef(List args, String name, String val) { } /** * This is called by subclasses when they determined that the sumbmitted job requires - * metastore access (e.g. Pig job that uses HCatalog). This then determines if + * metastore access (e.g. Pig job that uses HCatalog). This then determines if * secure access is required and causes TempletonControllerJob to set up a delegation token. * @see TempletonControllerJob */ diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java index d2127e1..34c47f8 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java @@ -653,7 +653,7 @@ public EnqueueBean mapReduceStreaming(@FormParam("input") List inputs, verifyParam(inputs, "input"); verifyParam(mapper, "mapper"); verifyParam(reducer, "reducer"); - + Map userArgs = new HashMap(); userArgs.put("user.name", getDoAsUser()); userArgs.put("input", inputs); @@ -680,8 +680,8 @@ public EnqueueBean mapReduceStreaming(@FormParam("input") List inputs, /** * Run a MapReduce Jar job. * Params correspond to the REST api params - * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access - * metastore, which requires additional steps for WebHCat to perform in a secure cluster. + * @param usesHcatalog if {@code true}, means the Jar uses HCat and thus needs to access + * metastore, which requires additional steps for WebHCat to perform in a secure cluster. * @param callback URL which WebHCat will call when the hive job finishes * @see org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob */ @@ -703,7 +703,7 @@ public EnqueueBean mapReduceJar(@FormParam("jar") String jar, verifyUser(); verifyParam(jar, "jar"); verifyParam(mainClass, "class"); - + Map userArgs = new HashMap(); userArgs.put("user.name", getDoAsUser()); userArgs.put("jar", jar); @@ -729,7 +729,7 @@ public EnqueueBean mapReduceJar(@FormParam("jar") String jar, * Run a Pig job. * Params correspond to the REST api params. If '-useHCatalog' is in the {@code pigArgs, usesHcatalog}, * is interpreted as true. - * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access + * @param usesHcatalog if {@code true}, means the Pig script uses HCat and thus needs to access * metastore, which requires additional steps for WebHCat to perform in a secure cluster. * This does nothing to ensure that Pig is installed on target node in the cluster. * @param callback URL which WebHCat will call when the hive job finishes @@ -752,7 +752,7 @@ public EnqueueBean pig(@FormParam("execute") String execute, if (execute == null && srcFile == null) { throw new BadParam("Either execute or file parameter required"); } - + //add all function arguments to a map Map userArgs = new HashMap(); userArgs.put("user.name", getDoAsUser()); @@ -819,7 +819,7 @@ public EnqueueBean sqoop(@FormParam("command") String command, * @param execute SQL statement to run, equivalent to "-e" from hive command line * @param srcFile name of hive script file to run, equivalent to "-f" from hive * command line - * @param hiveArgs additional command line argument passed to the hive command line. + * @param hiveArgs additional command line argument passed to the hive command line. * Please check https://cwiki.apache.org/Hive/languagemanual-cli.html * for detailed explanation of command line arguments * @param otherFiles additional files to be shipped to the launcher, such as the jars @@ -846,7 +846,7 @@ public EnqueueBean hive(@FormParam("execute") String execute, if (execute == null && srcFile == null) { throw new BadParam("Either execute or file parameter required"); } - + //add all function arguments to a map Map userArgs = new HashMap(); userArgs.put("user.name", getDoAsUser()); @@ -903,42 +903,42 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid) * Example usages: * 1. curl -s 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan' * Return all the Job IDs submitted by hsubramaniyan - * 2. curl -s + * 2. curl -s * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&showall=true' * Return all the Job IDs that are visible to hsubramaniyan * 3. curl -s * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&jobid=job_201312091733_0003' * Return all the Job IDs for hsubramaniyan after job_201312091733_0003. - * 4. curl -s 'http://localhost:50111/templeton/v1/jobs? + * 4. curl -s 'http://localhost:50111/templeton/v1/jobs? * user.name=hsubramaniyan&jobid=job_201312091733_0003&numrecords=5' - * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003. - * 5. curl -s + * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after job_201312091733_0003. + * 5. curl -s * 'http://localhost:50111/templeton/v1/jobs?user.name=hsubramaniyan&numrecords=5' - * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list + * Return the first 5(atmost) Job IDs submitted by hsubramaniyan after sorting the Job ID list * lexicographically. *

*

* Supporting pagination using "jobid" and "numrecords" parameters: * Step 1: Get the start "jobid" = job_xxx_000, "numrecords" = n - * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid" - * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's + * Step 2: Issue a curl command by specifying the user-defined "numrecords" and "jobid" + * Step 3: If list obtained from Step 2 has size equal to "numrecords", retrieve the list's * last record and get the Job Id of the last record as job_yyy_k, else quit. * Step 4: set "jobid"=job_yyy_k and go to step 2. - *

+ *

* @param fields If "fields" set to "*", the request will return full details of the job. * If "fields" is missing, will only return the job ID. Currently the value can only * be "*", other values are not allowed and will throw exception. * @param showall If "showall" is set to "true", the request will return all jobs the user * has permission to view, not only the jobs belonging to the user. - * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater - * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001", - * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of + * @param jobid If "jobid" is present, the records whose Job Id is lexicographically greater + * than "jobid" are only returned. For example, if "jobid" = "job_201312091733_0001", + * the jobs whose Job ID is greater than "job_201312091733_0001" are returned. The number of * records returned depends on the value of "numrecords". - * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords - * records appearing after "jobid" will be returned after sorting the Job Id list - * lexicographically. - * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will - * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present + * @param numrecords If the "jobid" and "numrecords" parameters are present, the top #numrecords + * records appearing after "jobid" will be returned after sorting the Job Id list + * lexicographically. + * If "jobid" parameter is missing and "numrecords" is present, the top #numrecords will + * be returned after lexicographically sorting the Job Id list. If "jobid" parameter is present * and "numrecords" is missing, all the records whose Job Id is greater than "jobid" are returned. * @return list of job items based on the filter conditions specified by the user. */ @@ -950,7 +950,7 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid) @QueryParam("jobid") String jobid, @QueryParam("numrecords") String numrecords) throws NotAuthorizedException, BadParam, IOException, InterruptedException { - + verifyUser(); boolean showDetails = false; @@ -971,9 +971,9 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid) try { if (numrecords != null) { numRecords = Integer.parseInt(numrecords); - if (numRecords <= 0) { - throw new BadParam("numrecords should be an integer > 0"); - } + if (numRecords <= 0) { + throw new BadParam("numrecords should be an integer > 0"); + } } else { numRecords = -1; @@ -983,18 +983,18 @@ public QueueStatusBean deleteJobId(@PathParam("jobid") String jobid) throw new BadParam("Invalid numrecords format: numrecords should be an integer > 0"); } - // Sort the list lexicographically + // Sort the list lexicographically Collections.sort(list); for (String job : list) { // If numRecords = -1, fetch all records. // Hence skip all the below checks when numRecords = -1. if (numRecords != -1) { - // If currRecord >= numRecords, we have already fetched the top #numRecords + // If currRecord >= numRecords, we have already fetched the top #numRecords if (currRecord >= numRecords) { break; - } - // If the current record needs to be returned based on the + } + // If the current record needs to be returned based on the // filter conditions specified by the user, increment the counter else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) { currRecord++; @@ -1101,7 +1101,7 @@ public void verifyDdlParam(String param, String name) * value of user.name query param, in kerberos mode it's the kinit'ed user. */ private String getRequestingUser() { - if (theSecurityContext == null) { + if (theSecurityContext == null) { return null; } String userName = null; @@ -1114,7 +1114,7 @@ private String getRequestingUser() { if(userName == null) { return null; } - //map hue/foo.bar@something.com->hue since user group checks + //map hue/foo.bar@something.com->hue since user group checks // and config files are in terms of short name return UserGroupInformation.createRemoteUser(userName).getShortUserName(); } @@ -1161,7 +1161,7 @@ private static String getRequestingHost(String requestingUser, HttpServletReques return unkHost; } } - + private void checkEnableLogPrerequisite(boolean enablelog, String statusdir) throws BadParam { if (enablelog && !TempletonUtils.isset(statusdir)) throw new BadParam("enablelog is only applicable when statusdir is set"); diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java index f7cc3e9..3ddb8f5 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TableDesc.java @@ -137,7 +137,7 @@ public boolean equals(Object o) { * Ther ASC or DESC sort order. */ @XmlRootElement - public static enum SortDirectionDesc { + public enum SortDirectionDesc { ASC, DESC } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java index addd0c2..36b64da 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobState.java @@ -169,9 +169,9 @@ public String getParent() throws IOException { String childJobIDs = getField("children"); if (childJobIDs != null) { for (String jobid : childJobIDs.split(",")) { - children.add(new JobState(jobid, config)); + children.add(new JobState(jobid, config)); } - } + } return children; } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java index ccf6107..f537e63 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java @@ -19,43 +19,42 @@ package org.apache.hive.hcatalog.templeton.tool; public interface JobSubmissionConstants { - public static final String COPY_NAME = "templeton.copy"; - public static final String STATUSDIR_NAME = "templeton.statusdir"; - public static final String ENABLE_LOG = "templeton.enablelog"; - public static final String JOB_TYPE = "templeton.jobtype"; - public static final String JAR_ARGS_NAME = "templeton.args"; - public static final String TEMPLETON_JOB_LAUNCH_TIME_NAME = "templeton.job.launch.time"; - public static final String OVERRIDE_CLASSPATH = "templeton.override-classpath"; - public static final String STDOUT_FNAME = "stdout"; - public static final String STDERR_FNAME = "stderr"; - public static final String EXIT_FNAME = "exit"; - public static final int WATCHER_TIMEOUT_SECS = 10; - public static final int KEEP_ALIVE_MSEC = 60 * 1000; + String COPY_NAME = "templeton.copy"; + String STATUSDIR_NAME = "templeton.statusdir"; + String ENABLE_LOG = "templeton.enablelog"; + String JOB_TYPE = "templeton.jobtype"; + String JAR_ARGS_NAME = "templeton.args"; + String TEMPLETON_JOB_LAUNCH_TIME_NAME = "templeton.job.launch.time"; + String OVERRIDE_CLASSPATH = "templeton.override-classpath"; + String STDOUT_FNAME = "stdout"; + String STDERR_FNAME = "stderr"; + String EXIT_FNAME = "exit"; + int WATCHER_TIMEOUT_SECS = 10; + int KEEP_ALIVE_MSEC = 60 * 1000; /* * The = sign in the string for TOKEN_FILE_ARG_PLACEHOLDER is required because * org.apache.hadoop.util.GenericOptionsParser.preProcessForWindows() prepares * arguments expecting an = sign. It will fail to prepare the arguments correctly * without the = sign present. - */ - public static final String TOKEN_FILE_ARG_PLACEHOLDER = + */ String TOKEN_FILE_ARG_PLACEHOLDER = "__MR_JOB_CREDENTIALS_OPTION=WEBHCAT_TOKEN_FILE_LOCATION__"; - public static final String TOKEN_FILE_ARG_PLACEHOLDER_TEZ = + String TOKEN_FILE_ARG_PLACEHOLDER_TEZ = "__TEZ_CREDENTIALS_OPTION=WEBHCAT_TOKEN_FILE_LOCATION_TEZ__"; // MRv2 job tag used to identify Templeton launcher child jobs. Each child job // will be tagged with the parent jobid so that on launcher task restart, all // previously running child jobs can be killed before the child job is launched // again. - public static final String MAPREDUCE_JOB_TAGS = "mapreduce.job.tags"; - public static final String MAPREDUCE_JOB_TAGS_ARG_PLACEHOLDER = + String MAPREDUCE_JOB_TAGS = "mapreduce.job.tags"; + String MAPREDUCE_JOB_TAGS_ARG_PLACEHOLDER = "__MR_JOB_TAGS_OPTION=MR_JOB_TAGS_JOBID__"; /** * constants needed for Pig job submission * The string values here are what Pig expects to see in it's environment */ - public static interface PigConstants { - public static final String HIVE_HOME = "HIVE_HOME"; - public static final String HCAT_HOME = "HCAT_HOME"; - public static final String PIG_OPTS = "PIG_OPTS"; + interface PigConstants { + String HIVE_HOME = "HIVE_HOME"; + String HCAT_HOME = "HCAT_HOME"; + String PIG_OPTS = "PIG_OPTS"; } } diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java index 6dc27f4..0362c49 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LogRetriever.java @@ -75,7 +75,8 @@ static class AttemptInfo { public String id; public URL baseUrl; - public enum AttemptStatus {COMPLETED, FAILED}; + public enum AttemptStatus {COMPLETED, FAILED} + AttemptStatus status; public String startTime; public String endTime; diff --git hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java index 97d572c..8938182 100644 --- hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java +++ hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonStorage.java @@ -44,18 +44,18 @@ */ public interface TempletonStorage { // These are the possible types referenced by 'type' below. - public enum Type { + enum Type { UNKNOWN, JOB, JOBTRACKING, TEMPLETONOVERHEAD } - public static final String STORAGE_CLASS = "templeton.storage.class"; - public static final String STORAGE_ROOT = "templeton.storage.root"; + String STORAGE_CLASS = "templeton.storage.class"; + String STORAGE_ROOT = "templeton.storage.root"; /** * Start the cleanup process for this storage type. * @param config */ - public void startCleanup(Configuration config); + void startCleanup(Configuration config); /** * Save a single key/value pair for a specific job id. @@ -64,7 +64,7 @@ * @param key The name of the field to save * @param val The value of the field to save */ - public void saveField(Type type, String id, String key, String val) + void saveField(Type type, String id, String key, String val) throws NotFoundException; /** @@ -76,7 +76,7 @@ public void saveField(Type type, String id, String key, String val) * @return The value of the field requested, or null if not * found. */ - public String getField(Type type, String id, String key); + String getField(Type type, String id, String key); /** * Get all the name/value pairs stored for this id. @@ -90,7 +90,7 @@ public void saveField(Type type, String id, String key, String val) * @param id The String id of this data grouping (jobid, etc.) * @return A Map of key/value pairs found for this type/id. */ - public Map getFields(Type type, String id); + Map getFields(Type type, String id); /** * Delete a data grouping (all data for a jobid, all tracking data @@ -102,14 +102,14 @@ public void saveField(Type type, String id, String key, String val) * @return True if successful, false if not, throws NotFoundException * if the id wasn't found. */ - public boolean delete(Type type, String id) throws NotFoundException; + boolean delete(Type type, String id) throws NotFoundException; /** * Get the id of each data grouping in the storage system. * * @return An ArrayList of ids. */ - public List getAll(); + List getAll(); /** * Get the id of each data grouping of a given type in the storage @@ -117,7 +117,7 @@ public void saveField(Type type, String id, String key, String val) * @param type The data type (as listed above) * @return An ArrayList of ids. */ - public List getAllForType(Type type); + List getAllForType(Type type); /** * Get the id of each data grouping that has the specific key/value @@ -126,7 +126,7 @@ public void saveField(Type type, String id, String key, String val) * @param value The value of the field to search for * @return An ArrayList of ids. */ - public List getAllForKey(String key, String value); + List getAllForKey(String key, String value); /** * Get the id of each data grouping of a given type that has the @@ -136,18 +136,17 @@ public void saveField(Type type, String id, String key, String val) * @param value The value of the field to search for * @return An ArrayList of ids. */ - public List getAllForTypeAndKey(Type type, String key, - String value); + List getAllForTypeAndKey(Type type, String key, String value); /** * For storage methods that require a connection, this is a hint * that it's time to open a connection. */ - public void openStorage(Configuration config) throws IOException; + void openStorage(Configuration config) throws IOException; /** * For storage methods that require a connection, this is a hint * that it's time to close the connection. */ - public void closeStorage() throws IOException; + void closeStorage() throws IOException; } diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java index bf05ba9..0030388 100644 --- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java +++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestWebHCatE2e.java @@ -263,7 +263,7 @@ String getAssertMsg() { return methodName + " " + submittedURL + " " + responseBody; } } - private static enum HTTP_METHOD_TYPE {GET, POST, DELETE, PUT} + private enum HTTP_METHOD_TYPE {GET, POST, DELETE, PUT} private static MethodCallRetVal doHttpCall(String uri, HTTP_METHOD_TYPE type) throws IOException { return doHttpCall(uri, type, null, null); } diff --git hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java index 0ad8f89..d748401 100644 --- hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java +++ hwi/src/java/org/apache/hadoop/hive/hwi/HWISessionItem.java @@ -53,7 +53,7 @@ /** Represents the state a session item can be in. */ public enum WebSessionItemStatus { NEW, READY, QUERY_SET, QUERY_RUNNING, DESTROY, KILL_QUERY - }; + } /** The Web Interface sessionName this is used to identify the session. */ private final String sessionName; diff --git hwi/web/session_manage.jsp hwi/web/session_manage.jsp index ef65289..60160dd 100644 --- hwi/web/session_manage.jsp +++ hwi/web/session_manage.jsp @@ -19,7 +19,7 @@ --%> <%@page import="org.apache.hadoop.hive.hwi.*" %> <%@page errorPage="error_page.jsp" %> -<% HWISessionManager hs = (HWISessionManager) application.getAttribute("hs");; %> +<% HWISessionManager hs = (HWISessionManager) application.getAttribute("hs"); %> <% HWIAuth auth = (HWIAuth) session.getAttribute("auth"); %> <% if (auth==null) { %> diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java index e65bf4d..6757c22 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAuthorizationApiAuthorizer.java @@ -71,7 +71,7 @@ protected static void setup() throws Exception { } interface FunctionInvoker { - public void invoke() throws Exception; + void invoke() throws Exception; } /** diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 130fd67..622a288 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -349,7 +349,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf // Test append_partition_by_name client.appendPartition(dbName, tblName, partName); Partition part5 = client.getPartition(dbName, tblName, part.getValues()); - assertTrue("Append partition by name failed", part5.getValues().equals(vals));; + assertTrue("Append partition by name failed", part5.getValues().equals(vals)); Path part5Path = new Path(part5.getSd().getLocation()); assertTrue(fs.exists(part5Path)); diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java index ed4b441..92d455b 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/security/DummyHiveMetastoreAuthorizationProvider.java @@ -49,7 +49,7 @@ PARTITION, TABLE_AND_PARTITION, AUTHORIZATION - }; + } class AuthCallContext { @@ -81,7 +81,7 @@ private Configuration conf; public static final Log LOG = LogFactory.getLog( - DummyHiveMetastoreAuthorizationProvider.class);; + DummyHiveMetastoreAuthorizationProvider.class); @Override public Configuration getConf() { diff --git jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java index 5898a6b..ec5e555 100644 --- jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java +++ jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java @@ -160,7 +160,7 @@ public InputStream getBinaryStream(int columnIndex) throws SQLException { } public InputStream getBinaryStream(String columnName) throws SQLException { - return getBinaryStream(findColumn(columnName)); + return getBinaryStream(findColumn(columnName)); } public Blob getBlob(int i) throws SQLException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java index d872be5..581b837 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -51,9 +51,8 @@ * @throws MetaException * thrown if there is any other erro */ - public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newTable) throws InvalidOperationException, - MetaException; + void alterTable(RawStore msdb, Warehouse wh, String dbname, String name, Table newTable) + throws InvalidOperationException, MetaException; /** * handles alter partition @@ -75,8 +74,8 @@ public abstract void alterTable(RawStore msdb, Warehouse wh, String dbname, * @throws AlreadyExistsException * @throws MetaException */ - public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part) + Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, + final String name, final List part_vals, final Partition new_part) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; @@ -98,8 +97,8 @@ public abstract Partition alterPartition(final RawStore msdb, Warehouse wh, fina * @throws AlreadyExistsException * @throws MetaException */ - public abstract List alterPartitions(final RawStore msdb, Warehouse wh, - final String dbname, final String name, final List new_part) + List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, + final String name, final List new_part) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java index 570b358..2d76279 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -39,7 +39,7 @@ * * @param table new table definition */ - public void preCreateTable(Table table) + void preCreateTable(Table table) throws MetaException; /** @@ -48,7 +48,7 @@ public void preCreateTable(Table table) * * @param table new table definition */ - public void rollbackCreateTable(Table table) + void rollbackCreateTable(Table table) throws MetaException; /** @@ -57,7 +57,7 @@ public void rollbackCreateTable(Table table) * * @param table new table definition */ - public void commitCreateTable(Table table) + void commitCreateTable(Table table) throws MetaException; /** @@ -66,7 +66,7 @@ public void commitCreateTable(Table table) * * @param table table definition */ - public void preDropTable(Table table) + void preDropTable(Table table) throws MetaException; /** @@ -75,7 +75,7 @@ public void preDropTable(Table table) * * @param table table definition */ - public void rollbackDropTable(Table table) + void rollbackDropTable(Table table) throws MetaException; /** @@ -87,6 +87,6 @@ public void rollbackDropTable(Table table) * @param deleteData whether to delete data as well; this should typically * be ignored in the case of an external table */ - public void commitDropTable(Table table, boolean deleteData) + void commitDropTable(Table table, boolean deleteData) throws MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java index 1cdae9b..4b32b2a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java @@ -33,7 +33,5 @@ * * @return hook, or null if none registered */ - public HiveMetaHook getHook(Table tbl) throws MetaException; + HiveMetaHook getHook(Table tbl) throws MetaException; } - -// End HiveMetaHookLoader.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a5ca093..d3fcb38 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -286,7 +286,7 @@ protected Formatter initialValue() { } }; - private final void logAuditEvent(String cmd) { + private void logAuditEvent(String cmd) { if (cmd == null) { return; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index 1675751..400a50f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -23,5 +23,5 @@ public interface IHMSHandler extends ThriftHiveMetastore.Iface { - public abstract void setConf(Configuration conf); + void setConf(Configuration conf); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index d6e849f..e7e26dc 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -79,14 +79,14 @@ * Returns whether current client is convertible with conf or not * @return */ - public boolean isCompatibleWith(HiveConf conf); + boolean isCompatibleWith(HiveConf conf); /** * Tries to reconnect this MetaStoreClient to the MetaStore. */ - public void reconnect() throws MetaException; + void reconnect() throws MetaException; - public void close(); + void close(); /** * Get the names of all databases in the MetaStore that match the given pattern. @@ -95,7 +95,7 @@ * @throws MetaException * @throws TException */ - public List getDatabases(String databasePattern) + List getDatabases(String databasePattern) throws MetaException, TException; /** @@ -104,7 +104,7 @@ * @throws MetaException * @throws TException */ - public List getAllDatabases() + List getAllDatabases() throws MetaException, TException; /** @@ -117,7 +117,7 @@ * @throws TException * @throws UnknownDBException */ - public List getTables(String dbName, String tablePattern) + List getTables(String dbName, String tablePattern) throws MetaException, TException, UnknownDBException; /** @@ -128,7 +128,7 @@ * @throws TException * @throws UnknownDBException */ - public List getAllTables(String dbName) + List getAllTables(String dbName) throws MetaException, TException, UnknownDBException; /** @@ -167,7 +167,7 @@ * The maximum number of tables returned * @return A list of table names that match the desired filter */ - public List listTableNamesByFilter(String dbName, String filter, short maxTables) + List listTableNamesByFilter(String dbName, String filter, short maxTables) throws MetaException, TException, InvalidOperationException, UnknownDBException; @@ -185,9 +185,8 @@ * @throws TException * A thrift communication error occurred */ - public void dropTable(String dbname, String tableName, boolean deleteData, - boolean ignoreUknownTab) throws MetaException, TException, - NoSuchObjectException; + void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUknownTab) + throws MetaException, TException, NoSuchObjectException; /** * Drop the table in the DEFAULT database. @@ -209,14 +208,14 @@ public void dropTable(String dbname, String tableName, boolean deleteData, * This method will be removed in release 0.7.0. */ @Deprecated - public void dropTable(String tableName, boolean deleteData) + void dropTable(String tableName, boolean deleteData) throws MetaException, UnknownTableException, TException, NoSuchObjectException; - public void dropTable(String dbname, String tableName) + void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException; - public boolean tableExists(String databaseName, String tableName) throws MetaException, + boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException; /** @@ -230,7 +229,7 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc * This method will be removed in release 0.7.0. */ @Deprecated - public boolean tableExists(String tableName) throws MetaException, + boolean tableExists(String tableName) throws MetaException, TException, UnknownDBException; /** @@ -249,7 +248,7 @@ public boolean tableExists(String tableName) throws MetaException, * This method will be removed in release 0.7.0. */ @Deprecated - public Table getTable(String tableName) throws MetaException, TException, + Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException; /** @@ -260,7 +259,7 @@ public Table getTable(String tableName) throws MetaException, TException, * @throws MetaException Could not fetch the database * @throws TException A thrift communication error occurred */ - public Database getDatabase(String databaseName) + Database getDatabase(String databaseName) throws NoSuchObjectException, MetaException, TException; @@ -279,7 +278,7 @@ public Database getDatabase(String databaseName) * @throws NoSuchObjectException * In case the table wasn't found. */ - public Table getTable(String dbName, String tableName) throws MetaException, + Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; /** @@ -301,7 +300,7 @@ public Table getTable(String dbName, String tableName) throws MetaException, * @throws MetaException * Any other errors */ - public List getTableObjectsByName(String dbName, List tableNames) + List
getTableObjectsByName(String dbName, List tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException; /** @@ -316,11 +315,10 @@ public Table getTable(String dbName, String tableName) throws MetaException, * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, * java.lang.String, java.util.List) */ - public Partition appendPartition(String tableName, String dbName, - List partVals) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException; + Partition appendPartition(String tableName, String dbName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - public Partition appendPartition(String tableName, String dbName, String name) + Partition appendPartition(String tableName, String dbName, String name) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -338,7 +336,7 @@ public Partition appendPartition(String tableName, String dbName, String name) * @throws TException * Thrift exception */ - public Partition add_partition(Partition partition) + Partition add_partition(Partition partition) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; @@ -356,7 +354,7 @@ public Partition add_partition(Partition partition) * @throws TException * Thrift exception */ - public int add_partitions(List partitions) + int add_partitions(List partitions) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -367,8 +365,8 @@ public int add_partitions(List partitions) * @param needResults Whether the results are needed * @return the partitions that were added, or null if !needResults */ - public List add_partitions( - List partitions, boolean ifNotExists, boolean needResults) + List add_partitions(List partitions, boolean ifNotExists, + boolean needResults) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -381,8 +379,7 @@ public int add_partitions(List partitions) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, * java.lang.String, java.util.List) */ - public Partition getPartition(String tblName, String dbName, - List partVals) throws NoSuchObjectException, MetaException, TException; + Partition getPartition(String tblName, String dbName, List partVals) throws NoSuchObjectException, MetaException, TException; /** * @param partitionSpecs @@ -392,10 +389,9 @@ public Partition getPartition(String tblName, String dbName, * @param destTableName * @return partition object */ - public Partition exchange_partition(Map partitionSpecs, - String sourceDb, String sourceTable, String destdb, - String destTableName) throws MetaException, NoSuchObjectException, - InvalidObjectException, TException; + Partition exchange_partition(Map partitionSpecs, String sourceDb, + String sourceTable, String destdb, String destTableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, TException; /** * @param dbName @@ -407,8 +403,8 @@ public Partition exchange_partition(Map partitionSpecs, * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, * java.lang.String, java.util.List) */ - public Partition getPartition(String dbName, String tblName, - String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; + Partition getPartition(String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** @@ -423,8 +419,8 @@ public Partition getPartition(String dbName, String tblName, * @throws NoSuchObjectException * @throws TException */ - public Partition getPartitionWithAuthInfo(String dbName, String tableName, - List pvals, String userName, List groupNames) + Partition getPartitionWithAuthInfo(String dbName, String tableName, List pvals, + String userName, List groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** @@ -436,17 +432,17 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName, * @throws MetaException * @throws TException */ - public List listPartitions(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException; + List listPartitions(String db_name, String tbl_name, short max_parts) + throws NoSuchObjectException, MetaException, TException; - public List listPartitions(String db_name, String tbl_name, - List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + List listPartitions(String db_name, String tbl_name, List part_vals, + short max_parts) throws NoSuchObjectException, MetaException, TException; - public List listPartitionNames(String db_name, String tbl_name, - short max_parts) throws MetaException, TException; + List listPartitionNames(String db_name, String tbl_name, short max_parts) + throws MetaException, TException; - public List listPartitionNames(String db_name, String tbl_name, - List part_vals, short max_parts) + List listPartitionNames(String db_name, String tbl_name, List part_vals, + short max_parts) throws MetaException, TException, NoSuchObjectException; /** @@ -463,8 +459,8 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName, * @throws NoSuchObjectException * @throws TException */ - public List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, + List listPartitionsByFilter(String db_name, String tbl_name, String filter, + short max_parts) throws MetaException, NoSuchObjectException, TException; @@ -480,8 +476,8 @@ public Partition getPartitionWithAuthInfo(String dbName, String tableName, * @param result the resulting list of partitions * @return whether the resulting list contains partitions which may or may not match the expr */ - public boolean listPartitionsByExpr(String db_name, String tbl_name, - byte[] expr, String default_partition_name, short max_parts, List result) + boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, + String default_partition_name, short max_parts, List result) throws TException; /** @@ -493,8 +489,8 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, * @return the list of partitions * @throws NoSuchObjectException */ - public List listPartitionsWithAuthInfo(String dbName, - String tableName, short s, String userName, List groupNames) + List listPartitionsWithAuthInfo(String dbName, String tableName, short s, + String userName, List groupNames) throws MetaException, TException, NoSuchObjectException; /** @@ -507,8 +503,8 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, * @throws MetaException * @throws TException */ - public List getPartitionsByNames(String db_name, String tbl_name, - List part_names) throws NoSuchObjectException, MetaException, TException; + List getPartitionsByNames(String db_name, String tbl_name, List part_names) + throws NoSuchObjectException, MetaException, TException; /** * @param dbName @@ -520,9 +516,9 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, * @return the list of paritions * @throws NoSuchObjectException */ - public List listPartitionsWithAuthInfo(String dbName, - String tableName, List partialPvals, short s, String userName, - List groupNames) throws MetaException, TException, NoSuchObjectException; + List listPartitionsWithAuthInfo(String dbName, String tableName, + List partialPvals, short s, String userName, List groupNames) + throws MetaException, TException, NoSuchObjectException; /** * @param db_name @@ -537,9 +533,10 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, * @throws UnknownPartitionException * @throws InvalidPartitionException */ - public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, - PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) + throws MetaException, NoSuchObjectException, TException, UnknownTableException, + UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** * @param db_name @@ -554,16 +551,17 @@ public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, - PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, - UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) + throws MetaException, NoSuchObjectException, TException, UnknownTableException, + UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** * @param partVals * @throws TException * @throws MetaException */ - public void validatePartitionNameCharacters(List partVals) + void validatePartitionNameCharacters(List partVals) throws TException, MetaException; @@ -577,25 +575,25 @@ public void validatePartitionNameCharacters(List partVals) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) */ - public void createTable(Table tbl) throws AlreadyExistsException, + void createTable(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; - public void alter_table(String defaultDatabaseName, String tblName, - Table table) throws InvalidOperationException, MetaException, TException; + void alter_table(String defaultDatabaseName, String tblName, Table table) + throws InvalidOperationException, MetaException, TException; - public void createDatabase(Database db) + void createDatabase(Database db) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - public void dropDatabase(String name) + void dropDatabase(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) + void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException; - public void alterDatabase(String name, Database db) + void alterDatabase(String name, Database db) throws NoSuchObjectException, MetaException, TException; /** @@ -611,17 +609,15 @@ public void alterDatabase(String name, Database db) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, * java.lang.String, java.util.List, boolean) */ - public boolean dropPartition(String db_name, String tbl_name, - List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + boolean dropPartition(String db_name, String tbl_name, List part_vals, boolean deleteData) + throws NoSuchObjectException, MetaException, TException; List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, boolean ignoreProtection, boolean ifExists) throws NoSuchObjectException, MetaException, TException; - public boolean dropPartition(String db_name, String tbl_name, - String name, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) + throws NoSuchObjectException, MetaException, TException; /** * updates a partition to new partition * @@ -638,7 +634,7 @@ public boolean dropPartition(String db_name, String tbl_name, * @throws TException * if error in communicating with metastore server */ - public void alter_partition(String dbName, String tblName, Partition newPart) + void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException; /** @@ -657,7 +653,7 @@ public void alter_partition(String dbName, String tblName, Partition newPart) * @throws TException * if error in communicating with metastore server */ - public void alter_partitions(String dbName, String tblName, List newParts) + void alter_partitions(String dbName, String tblName, List newParts) throws InvalidOperationException, MetaException, TException; /** @@ -678,7 +674,8 @@ public void alter_partitions(String dbName, String tblName, List newP * @throws TException * if error in communicating with metastore server */ - public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + void renamePartition(final String dbname, final String name, final List part_vals, + final Partition newPart) throws InvalidOperationException, MetaException, TException; /** @@ -691,7 +688,7 @@ public void renamePartition(final String dbname, final String name, final List getFields(String db, String tableName) + List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; @@ -705,7 +702,7 @@ public void renamePartition(final String dbname, final String name, final List getSchema(String db, String tableName) + List getSchema(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; @@ -718,7 +715,7 @@ public void renamePartition(final String dbname, final String name, final List partitionNameToVals(String name) + List partitionNameToVals(String name) throws MetaException, TException; /** * @@ -739,7 +736,7 @@ public String getConfigValue(String name, String defaultValue) * @throws MetaException * @throws TException */ - public Map partitionNameToSpec(String name) + Map partitionNameToSpec(String name) throws MetaException, TException; /** @@ -751,11 +748,11 @@ public String getConfigValue(String name, String defaultValue) * @throws TException * @throws AlreadyExistsException */ - public void createIndex(Index index, Table indexTable) throws InvalidObjectException, + void createIndex(Index index, Table indexTable) throws InvalidObjectException, MetaException, NoSuchObjectException, TException, AlreadyExistsException; - public void alter_index(String dbName, String tblName, String indexName, - Index index) throws InvalidOperationException, MetaException, TException; + void alter_index(String dbName, String tblName, String indexName, Index index) + throws InvalidOperationException, MetaException, TException; /** * @@ -768,7 +765,7 @@ public void alter_index(String dbName, String tblName, String indexName, * @throws NoSuchObjectException * @throws TException */ - public Index getIndex(String dbName, String tblName, String indexName) + Index getIndex(String dbName, String tblName, String indexName) throws MetaException, UnknownTableException, NoSuchObjectException, TException; @@ -783,8 +780,8 @@ public Index getIndex(String dbName, String tblName, String indexName) * @throws MetaException * @throws TException */ - public List listIndexes(String db_name, String tbl_name, - short max) throws NoSuchObjectException, MetaException, TException; + List listIndexes(String db_name, String tbl_name, short max) + throws NoSuchObjectException, MetaException, TException; /** * list all the index names of the give base table. @@ -796,8 +793,8 @@ public Index getIndex(String dbName, String tblName, String indexName) * @throws MetaException * @throws TException */ - public List listIndexNames(String db_name, String tbl_name, - short max) throws MetaException, TException; + List listIndexNames(String db_name, String tbl_name, short max) + throws MetaException, TException; /** * @param db_name @@ -809,9 +806,8 @@ public Index getIndex(String dbName, String tblName, String indexName) * @throws MetaException * @throws TException */ - public boolean dropIndex(String db_name, String tbl_name, - String name, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + boolean dropIndex(String db_name, String tbl_name, String name, boolean deleteData) + throws NoSuchObjectException, MetaException, TException; /** * Write table level column statistics to persistent store @@ -824,7 +820,7 @@ public boolean dropIndex(String db_name, String tbl_name, * @throws InvalidInputException */ - public boolean updateTableColumnStatistics(ColumnStatistics statsObj) + boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; @@ -839,7 +835,7 @@ public boolean updateTableColumnStatistics(ColumnStatistics statsObj) * @throws InvalidInputException */ - public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; @@ -847,15 +843,15 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * Get table column statistics given dbName, tableName and multiple colName-s * @return ColumnStatistics struct for a given db, table and columns */ - public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException; + List getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException; /** * Get partitions column statistics given dbName, tableName, multiple partitions and colName-s * @return ColumnStatistics struct for a given db, table and columns */ - public Map> getPartitionColumnStatistics(String dbName, - String tableName, List partNames, List colNames) + Map> getPartitionColumnStatistics(String dbName, + String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; /** @@ -872,8 +868,8 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) * @throws InvalidInputException */ - public boolean deletePartitionColumnStatistics(String dbName, String tableName, - String partName, String colName) throws NoSuchObjectException, MetaException, + boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -889,7 +885,7 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, * @throws InvalidInputException */ - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** @@ -899,7 +895,7 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri * @throws MetaException * @throws TException */ - public boolean create_role(Role role) + boolean create_role(Role role) throws MetaException, TException; /** @@ -910,7 +906,7 @@ public boolean create_role(Role role) * @throws MetaException * @throws TException */ - public boolean drop_role(String role_name) throws MetaException, TException; + boolean drop_role(String role_name) throws MetaException, TException; /** * list all role names @@ -918,7 +914,7 @@ public boolean create_role(Role role) * @throws TException * @throws MetaException */ - public List listRoleNames() throws MetaException, TException; + List listRoleNames() throws MetaException, TException; /** * @@ -932,9 +928,9 @@ public boolean create_role(Role role) * @throws MetaException * @throws TException */ - public boolean grant_role(String role_name, String user_name, - PrincipalType principalType, String grantor, PrincipalType grantorType, - boolean grantOption) throws MetaException, TException; + boolean grant_role(String role_name, String user_name, PrincipalType principalType, + String grantor, PrincipalType grantorType, boolean grantOption) + throws MetaException, TException; /** * @param role_name @@ -947,8 +943,8 @@ public boolean grant_role(String role_name, String user_name, * @throws MetaException * @throws TException */ - public boolean revoke_role(String role_name, String user_name, - PrincipalType principalType, boolean grantOption) throws MetaException, TException; + boolean revoke_role(String role_name, String user_name, PrincipalType principalType, + boolean grantOption) throws MetaException, TException; /** * @@ -958,7 +954,7 @@ public boolean revoke_role(String role_name, String user_name, * @throws MetaException * @throws TException */ - public List list_roles(String principalName, PrincipalType principalType) + List list_roles(String principalName, PrincipalType principalType) throws MetaException, TException; /** @@ -971,8 +967,8 @@ public boolean revoke_role(String role_name, String user_name, * @throws MetaException * @throws TException */ - public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, - String user_name, List group_names) throws MetaException, + PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String user_name, + List group_names) throws MetaException, TException; /** @@ -984,8 +980,8 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, * @throws MetaException * @throws TException */ - public List list_privileges(String principal_name, - PrincipalType principal_type, HiveObjectRef hiveObject) + List list_privileges(String principal_name, PrincipalType principal_type, + HiveObjectRef hiveObject) throws MetaException, TException; /** @@ -994,7 +990,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, * @throws MetaException * @throws TException */ - public boolean grant_privileges(PrivilegeBag privileges) + boolean grant_privileges(PrivilegeBag privileges) throws MetaException, TException; /** @@ -1003,7 +999,7 @@ public boolean grant_privileges(PrivilegeBag privileges) * @throws MetaException * @throws TException */ - public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) + boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, TException; /** @@ -1013,7 +1009,7 @@ public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) * @throws MetaException * @throws TException */ - public String getDelegationToken(String owner, String renewerKerberosPrincipalName) + String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws MetaException, TException; /** @@ -1022,28 +1018,28 @@ public String getDelegationToken(String owner, String renewerKerberosPrincipalNa * @throws MetaException * @throws TException */ - public long renewDelegationToken(String tokenStrForm) throws MetaException, TException; + long renewDelegationToken(String tokenStrForm) throws MetaException, TException; /** * @param tokenStrForm * @throws MetaException * @throws TException */ - public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException; + void cancelDelegationToken(String tokenStrForm) throws MetaException, TException; - public void createFunction(Function func) + void createFunction(Function func) throws InvalidObjectException, MetaException, TException; - public void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException, TException; - public void dropFunction(String dbName, String funcName) throws MetaException, + void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; - public Function getFunction(String dbName, String funcName) + Function getFunction(String dbName, String funcName) throws MetaException, TException; - public List getFunctions(String dbName, String pattern) + List getFunctions(String dbName, String pattern) throws MetaException, TException; /** @@ -1051,7 +1047,7 @@ public Function getFunction(String dbName, String funcName) * @return list of valid transactions * @throws TException */ - public ValidTxnList getValidTxns() throws TException; + ValidTxnList getValidTxns() throws TException; /** * Initiate a transaction. @@ -1061,7 +1057,7 @@ public Function getFunction(String dbName, String funcName) * @return transaction identifier * @throws TException */ - public long openTxn(String user) throws TException; + long openTxn(String user) throws TException; /** * Initiate a batch of transactions. It is not guaranteed that the @@ -1088,7 +1084,7 @@ public Function getFunction(String dbName, String funcName) * optimistically assuming that the result matches the request. * @throws TException */ - public OpenTxnsResponse openTxns(String user, int numTxns) throws TException; + OpenTxnsResponse openTxns(String user, int numTxns) throws TException; /** * Rollback a transaction. This will also unlock any locks associated with @@ -1099,7 +1095,7 @@ public Function getFunction(String dbName, String funcName) * deleted. * @throws TException */ - public void rollbackTxn(long txnid) throws NoSuchTxnException, TException; + void rollbackTxn(long txnid) throws NoSuchTxnException, TException; /** * Commit a transaction. This will also unlock any locks associated with @@ -1112,7 +1108,7 @@ public Function getFunction(String dbName, String funcName) * aborted. This can result from the transaction timing out. * @throws TException */ - public void commitTxn(long txnid) + void commitTxn(long txnid) throws NoSuchTxnException, TxnAbortedException, TException; /** @@ -1122,7 +1118,7 @@ public void commitTxn(long txnid) * @return List of currently opened transactions, included aborted ones. * @throws TException */ - public GetOpenTxnsInfoResponse showTxns() throws TException; + GetOpenTxnsInfoResponse showTxns() throws TException; /** * Request a set of locks. All locks needed for a particular query, DML, @@ -1152,7 +1148,7 @@ public void commitTxn(long txnid) * aborted. This can result from the transaction timing out. * @throws TException */ - public LockResponse lock(LockRequest request) + LockResponse lock(LockRequest request) throws NoSuchTxnException, TxnAbortedException, TException; /** @@ -1176,7 +1172,7 @@ public LockResponse lock(LockRequest request) * This can result from the lock timing out and being unlocked by the system. * @throws TException */ - public LockResponse checkLock(long lockid) + LockResponse checkLock(long lockid) throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException; @@ -1191,7 +1187,7 @@ public LockResponse checkLock(long lockid) * transaction. * @throws TException */ - public void unlock(long lockid) + void unlock(long lockid) throws NoSuchLockException, TxnOpenException, TException; /** @@ -1199,7 +1195,7 @@ public void unlock(long lockid) * @return List of currently held and waiting locks. * @throws TException */ - public ShowLocksResponse showLocks() throws TException; + ShowLocksResponse showLocks() throws TException; /** * Send a heartbeat to indicate that the client holding these locks (if @@ -1221,7 +1217,7 @@ public void unlock(long lockid) * This can result from the lock timing out and being unlocked by the system. * @throws TException */ - public void heartbeat(long txnid, long lockid) + void heartbeat(long txnid, long lockid) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException; @@ -1235,7 +1231,7 @@ public void heartbeat(long txnid, long lockid) * have already been closed) and which were aborted. * @throws TException */ - public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException; + HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException; /** * Send a request to compact a table or partition. This will not block until the compaction is @@ -1250,7 +1246,7 @@ public void heartbeat(long txnid, long lockid) * @param type Whether this is a major or minor compaction. * @throws TException */ - public void compact(String dbname, String tableName, String partitionName, CompactionType type) + void compact(String dbname, String tableName, String partitionName, CompactionType type) throws TException; /** @@ -1259,9 +1255,9 @@ public void compact(String dbname, String tableName, String partitionName, Comp * in progress, and finished but waiting to clean the existing files. * @throws TException */ - public ShowCompactResponse showCompactions() throws TException; + ShowCompactResponse showCompactions() throws TException; - public class IncompatibleMetastoreException extends MetaException { + class IncompatibleMetastoreException extends MetaException { public IncompatibleMetastoreException(String message) { super(message); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index a23d122..8ce292d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -739,7 +739,7 @@ protected boolean shouldStop() { return filterBuffer.hasError(); } - private static enum FilterType { + private enum FilterType { Integral, String, Date, diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java index a141793..b284089 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java @@ -36,7 +36,7 @@ * @return true on success * @throws MetaException */ - public boolean deleteDir(FileSystem fs, Path f, boolean recursive, - Configuration conf) throws MetaException; + boolean deleteDir(FileSystem fs, Path f, boolean recursive, Configuration conf) + throws MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 4f186f4..5c7b663 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -150,7 +150,7 @@ private static Lock pmfPropLock = new ReentrantLock(); private static final Log LOG = LogFactory.getLog(ObjectStore.class.getName()); - private static enum TXN_STATUS { + private enum TXN_STATUS { NO_STATE, OPEN, COMMITED, ROLLBACK } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java index 0787775..12d4ce8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java @@ -33,7 +33,7 @@ * @param expr Serialized expression. * @return The filter string. */ - public String convertExprToFilter(byte[] expr) throws MetaException; + String convertExprToFilter(byte[] expr) throws MetaException; /** * Filters the partition names via serialized Hive expression. @@ -43,6 +43,6 @@ * @param partitionNames Partition names; the list is modified in place. * @return Whether there were any unknown partitions preserved in the name list. */ - public boolean filterPartitionsByExpr(List columnNames, byte[] expr, - String defaultPartitionName, List partitionNames) throws MetaException; + boolean filterPartitionsByExpr(List columnNames, byte[] expr, String defaultPartitionName, + List partitionNames) throws MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 2379ce7..5a98d41 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -63,10 +63,10 @@ */ @Target(value = ElementType.METHOD) @Retention(value = RetentionPolicy.RUNTIME) - public @interface CanNotRetry { + @interface CanNotRetry { } - public abstract void shutdown(); + void shutdown(); /** * Opens a new one or the one already created Every call of this function must @@ -75,7 +75,7 @@ * @return an active transaction */ - public abstract boolean openTransaction(); + boolean openTransaction(); /** * if this is the commit of the first open call then an actual commit is @@ -84,66 +84,65 @@ * @return true or false */ @CanNotRetry - public abstract boolean commitTransaction(); + boolean commitTransaction(); /** * Rolls back the current transaction if it is active */ @CanNotRetry - public abstract void rollbackTransaction(); + void rollbackTransaction(); - public abstract void createDatabase(Database db) + void createDatabase(Database db) throws InvalidObjectException, MetaException; - public abstract Database getDatabase(String name) + Database getDatabase(String name) throws NoSuchObjectException; - public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; - public abstract boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; - public abstract List getDatabases(String pattern) throws MetaException; + List getDatabases(String pattern) throws MetaException; - public abstract List getAllDatabases() throws MetaException; + List getAllDatabases() throws MetaException; - public abstract boolean createType(Type type); + boolean createType(Type type); - public abstract Type getType(String typeName); + Type getType(String typeName); - public abstract boolean dropType(String typeName); + boolean dropType(String typeName); - public abstract void createTable(Table tbl) throws InvalidObjectException, + void createTable(Table tbl) throws InvalidObjectException, MetaException; - public abstract boolean dropTable(String dbName, String tableName) + boolean dropTable(String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract Table getTable(String dbName, String tableName) + Table getTable(String dbName, String tableName) throws MetaException; - public abstract boolean addPartition(Partition part) + boolean addPartition(Partition part) throws InvalidObjectException, MetaException; - public abstract boolean addPartitions(String dbName, String tblName, List parts) + boolean addPartitions(String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; - public abstract Partition getPartition(String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException; + Partition getPartition(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException; - public abstract boolean doesPartitionExist(String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException; + boolean doesPartitionExist(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException; - public abstract boolean dropPartition(String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, - InvalidInputException; + boolean dropPartition(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract List getPartitions(String dbName, - String tableName, int max) throws MetaException, NoSuchObjectException; + List getPartitions(String dbName, String tableName, int max) + throws MetaException, NoSuchObjectException; - public abstract void alterTable(String dbname, String name, Table newTable) + void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - public List getTables(String dbName, String pattern) + List getTables(String dbName, String pattern) throws MetaException; /** @@ -156,10 +155,10 @@ public abstract void alterTable(String dbname, String name, Table newTable) * If there are duplicate names, only one instance of the table will be returned * @throws MetaException */ - public List
getTableObjectsByName(String dbname, List tableNames) + List
getTableObjectsByName(String dbname, List tableNames) throws MetaException, UnknownDBException; - public List getAllTables(String dbName) throws MetaException; + List getAllTables(String dbName) throws MetaException; /** * Gets a list of tables based on a filter string and filter type. @@ -173,116 +172,115 @@ public abstract void alterTable(String dbname, String name, Table newTable) * @throws MetaException * @throws UnknownDBException */ - public abstract List listTableNamesByFilter(String dbName, - String filter, short max_tables) throws MetaException, UnknownDBException; + List listTableNamesByFilter(String dbName, String filter, short max_tables) + throws MetaException, UnknownDBException; - public abstract List listPartitionNames(String db_name, - String tbl_name, short max_parts) throws MetaException; + List listPartitionNames(String db_name, String tbl_name, short max_parts) + throws MetaException; - public abstract List listPartitionNamesByFilter(String db_name, - String tbl_name, String filter, short max_parts) throws MetaException; + List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, + short max_parts) throws MetaException; - public abstract void alterPartition(String db_name, String tbl_name, List part_vals, - Partition new_part) throws InvalidObjectException, MetaException; + void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) + throws InvalidObjectException, MetaException; - public abstract void alterPartitions(String db_name, String tbl_name, - List> part_vals_list, List new_parts) + void alterPartitions(String db_name, String tbl_name, List> part_vals_list, + List new_parts) throws InvalidObjectException, MetaException; - public abstract boolean addIndex(Index index) + boolean addIndex(Index index) throws InvalidObjectException, MetaException; - public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; + Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; + boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract List getIndexes(String dbName, - String origTableName, int max) throws MetaException; + List getIndexes(String dbName, String origTableName, int max) throws MetaException; - public abstract List listIndexNames(String dbName, - String origTableName, short max) throws MetaException; + List listIndexNames(String dbName, String origTableName, short max) throws MetaException; - public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException; - public abstract List getPartitionsByFilter( - String dbName, String tblName, String filter, short maxParts) + List getPartitionsByFilter(String dbName, String tblName, String filter, + short maxParts) throws MetaException, NoSuchObjectException; - public abstract boolean getPartitionsByExpr(String dbName, String tblName, - byte[] expr, String defaultPartitionName, short maxParts, List result) + boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + String defaultPartitionName, short maxParts, List result) throws TException; - public abstract List getPartitionsByNames( - String dbName, String tblName, List partNames) + List getPartitionsByNames(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; - public abstract Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + Table markPartitionForEvent(String dbName, String tblName, Map partVals, + PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, + UnknownPartitionException; - public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, + PartitionEventType evtType) + throws MetaException, UnknownTableException, InvalidPartitionException, + UnknownPartitionException; - public abstract boolean addRole(String rowName, String ownerName) + boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; - public abstract boolean grantRole(Role role, String userName, PrincipalType principalType, - String grantor, PrincipalType grantorType, boolean grantOption) + boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType, - boolean grantOption) throws MetaException, NoSuchObjectException; + boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) + throws MetaException, NoSuchObjectException; - public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, - List groupNames) throws InvalidObjectException, MetaException; + PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) + throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, - List groupNames) throws InvalidObjectException, MetaException; + PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, List groupNames) + throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, - String userName, List groupNames) throws InvalidObjectException, MetaException; + PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, String userName, + List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, - String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; + PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, String partition, + String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, - String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; + PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, String partitionName, + String columnName, String userName, List groupNames) + throws InvalidObjectException, MetaException; - public abstract List listPrincipalGlobalGrants(String principalName, - PrincipalType principalType); + List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType); - public abstract List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName); + List listPrincipalDBGrants(String principalName, PrincipalType principalType, + String dbName); - public abstract List listAllTableGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName); + List listAllTableGrants(String principalName, PrincipalType principalType, + String dbName, String tableName); - public abstract List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String partName); + List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, String partName); - public abstract List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String columnName); + List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, String columnName); - public abstract List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, - String tableName, String partName, String columnName); + List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, String dbName, String tableName, String partName, + String columnName); - public abstract boolean grantPrivileges (PrivilegeBag privileges) + boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) + boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract org.apache.hadoop.hive.metastore.api.Role getRole( - String roleName) throws NoSuchObjectException; + org.apache.hadoop.hive.metastore.api.Role getRole(String roleName) throws NoSuchObjectException; - public List listRoleNames(); + List listRoleNames(); - public List listRoles(String principalName, - PrincipalType principalType); + List listRoles(String principalName, PrincipalType principalType); /** @@ -290,15 +288,15 @@ public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean gran * @param roleName * @return */ - public List listRoleMembers(String roleName); + List listRoleMembers(String roleName); - public abstract Partition getPartitionWithAuth(String dbName, String tblName, - List partVals, String user_name, List group_names) + Partition getPartitionWithAuth(String dbName, String tblName, List partVals, + String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract List getPartitionsWithAuth(String dbName, - String tblName, short maxParts, String userName, List groupNames) + List getPartitionsWithAuth(String dbName, String tblName, short maxParts, + String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -316,8 +314,8 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws MetaException * @throws NoSuchObjectException */ - public abstract List listPartitionNamesPs(String db_name, String tbl_name, - List part_vals, short max_parts) + List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, + short max_parts) throws MetaException, NoSuchObjectException; /** @@ -341,8 +339,8 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws NoSuchObjectException * @throws InvalidObjectException */ - public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, + short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore @@ -356,7 +354,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) + boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** Persists the given column statistics object to the metastore @@ -370,8 +368,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, - List partVals) + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** @@ -387,15 +384,15 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * @throws InvalidInputException * */ - public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException; + ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List colName) + throws MetaException, NoSuchObjectException; /** * Returns the relevant column statistics for given columns in given partitions in a given * table in a given database if such statistics exist. */ - public abstract List getPartitionColumnStatistics( - String dbName, String tblName, List partNames, List colNames) + List getPartitionColumnStatistics(String dbName, String tblName, + List partNames, List colNames) throws MetaException, NoSuchObjectException; /** @@ -415,8 +412,8 @@ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String * @throws InvalidInputException */ - public abstract boolean deletePartitionColumnStatistics(String dbName, String tableName, - String partName, List partVals, String colName) + boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** @@ -434,34 +431,33 @@ public abstract boolean deletePartitionColumnStatistics(String dbName, String ta * @throws InvalidInputException */ - public abstract boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; - public abstract long cleanupEvents(); + long cleanupEvents(); - public abstract boolean addToken(String tokenIdentifier, String delegationToken); + boolean addToken(String tokenIdentifier, String delegationToken); - public abstract boolean removeToken(String tokenIdentifier); + boolean removeToken(String tokenIdentifier); - public abstract String getToken(String tokenIdentifier); + String getToken(String tokenIdentifier); - public abstract List getAllTokenIdentifiers(); + List getAllTokenIdentifiers(); - public abstract int addMasterKey(String key) throws MetaException; + int addMasterKey(String key) throws MetaException; - public abstract void updateMasterKey(Integer seqNo, String key) + void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException; - public abstract boolean removeMasterKey(Integer keySeq); + boolean removeMasterKey(Integer keySeq); - public abstract String[] getMasterKeys(); + String[] getMasterKeys(); - public abstract void verifySchema() throws MetaException; + void verifySchema() throws MetaException; - public abstract String getMetaStoreSchemaVersion() throws MetaException; + String getMetaStoreSchemaVersion() throws MetaException; - public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; + void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; void dropPartitions(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; @@ -502,7 +498,7 @@ void dropPartitions(String dbName, String tblName, List partNames) * @throws InvalidObjectException * @throws MetaException */ - public void createFunction(Function func) + void createFunction(Function func) throws InvalidObjectException, MetaException; /** @@ -513,7 +509,7 @@ public void createFunction(Function func) * @throws InvalidObjectException * @throws MetaException */ - public void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; /** @@ -526,7 +522,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) * @throws InvalidObjectException * @throws InvalidInputException */ - public void dropFunction(String dbName, String funcName) + void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** @@ -536,7 +532,7 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public Function getFunction(String dbName, String funcName) throws MetaException; + Function getFunction(String dbName, String funcName) throws MetaException; /** * Retrieve list of function names based on name pattern. @@ -545,6 +541,6 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public List getFunctions(String dbName, String pattern) throws MetaException; + List getFunctions(String dbName, String pattern) throws MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java index 4499485..956d96a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java @@ -27,7 +27,7 @@ public abstract class PreEventContext { - public static enum PreEventType { + public enum PreEventType { CREATE_TABLE, DROP_TABLE, ALTER_TABLE, diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java index 4fa841b..4b1afc4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java @@ -36,14 +36,11 @@ * @return the connection URL * @throws Exception */ - public String getJdoConnectionUrl(Configuration conf) - throws Exception; + String getJdoConnectionUrl(Configuration conf) throws Exception; /** * Alerts this that the connection URL was bad. Can be used to collect stats, * etc. - * - * @param url */ - public void notifyBadConnectionUrl(String url); + void notifyBadConnectionUrl(String url); } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index b8d1afc..306a12d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -68,13 +68,13 @@ private final String sqlOp; // private constructor - private Operator(String op){ + Operator(String op){ this.op = op; this.jdoOp = op; this.sqlOp = op; } - private Operator(String op, String jdoOp, String sqlOp){ + Operator(String op, String jdoOp, String sqlOp){ this.op = op; this.jdoOp = jdoOp; this.sqlOp = sqlOp; diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 4246d68..dac63d7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -461,7 +461,6 @@ STATS_SKIPPING_BY_ERROR(30017, "Skipping stats aggregation by error {0}", true), ORC_CORRUPTED_READ(30018, "Corruption in ORC data encountered. To skip reading corrupted " + "data, set " + HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA + " to true"); - ; private int errorCode; private String mesg; @@ -573,20 +572,20 @@ public static String findSQLState(String mesg) { return error.getSQLState(); } - private ErrorMsg(int errorCode, String mesg) { + ErrorMsg(int errorCode, String mesg) { this(errorCode, mesg, "42000", false); } - private ErrorMsg(int errorCode, String mesg, boolean format) { + ErrorMsg(int errorCode, String mesg, boolean format) { // 42000 is the generic SQLState for syntax error. this(errorCode, mesg, "42000", format); } - private ErrorMsg(int errorCode, String mesg, String sqlState) { + ErrorMsg(int errorCode, String mesg, String sqlState) { this(errorCode, mesg, sqlState, false); } - private ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) { + ErrorMsg(int errorCode, String mesg, String sqlState, boolean format) { this.errorCode = errorCode; this.mesg = mesg; this.sqlState = sqlState; diff --git ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java index 4dafeac..72d6d38 100644 --- ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHook.java @@ -34,13 +34,11 @@ * Invoked before Hive begins any processing of a command in the Driver, * notably before compilation and any customizable performance logging. */ - public void preDriverRun( - HiveDriverRunHookContext hookContext) throws Exception; + void preDriverRun(HiveDriverRunHookContext hookContext) throws Exception; /** * Invoked after Hive performs any processing of a command, just before a * response is returned to the entity calling the Driver. */ - public void postDriverRun( - HiveDriverRunHookContext hookContext) throws Exception; + void postDriverRun(HiveDriverRunHookContext hookContext) throws Exception; } diff --git ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java index 777730b..7fa86ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/HiveDriverRunHookContext.java @@ -25,6 +25,6 @@ * HiveDriverRunHook. */ public interface HiveDriverRunHookContext extends Configurable{ - public String getCommand(); - public void setCommand(String command); + String getCommand(); + void setCommand(String command); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java index 5d78ce0..de7301b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/BucketMatcher.java @@ -25,12 +25,12 @@ public interface BucketMatcher { - public List getAliasBucketFiles(String currentInputFile, String refTableAlias, String alias); + List getAliasBucketFiles(String currentInputFile, String refTableAlias, String alias); - public void setAliasBucketFileNameMapping( - Map>> aliasBucketFileNameMapping); + void setAliasBucketFileNameMapping( + Map>> aliasBucketFileNameMapping); - public Map getBucketFileNameMapping(); + Map getBucketFileNameMapping(); - public void setBucketFileNameMapping(Map bucketFileNameMapping); + void setBucketFileNameMapping(Map bucketFileNameMapping); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index f15755f..e64b470 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3640,7 +3640,7 @@ private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws Hi console.printInfo("Dropped the partition " + partition.getName()); // We have already locked the table, don't lock the partitions. work.getOutputs().add(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK)); - }; + } } private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FetchFormatter.java ql/src/java/org/apache/hadoop/hive/ql/exec/FetchFormatter.java index c2ed0d6..1216467 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchFormatter.java @@ -40,7 +40,7 @@ T convert(Object row, ObjectInspector rowOI) throws Exception; - public static class ThriftFormatter implements FetchFormatter { + class ThriftFormatter implements FetchFormatter { int protocol; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 1dde78e..ca38dc3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -105,7 +105,7 @@ * RecordWriter. * */ - public static interface RecordWriter { + public interface RecordWriter { void write(Writable w) throws IOException; void close(boolean abort) throws IOException; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java index 516ba42..01d2084 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java @@ -38,11 +38,7 @@ private static final long serialVersionUID = 1L; - /** - * Counter. - * - */ - public static enum Counter { + public enum Counter { FILTERED, PASSED } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index c747099..d5b1d2c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -44,11 +44,7 @@ private transient SkewJoinHandler skewJoinKeyContext = null; - /** - * SkewkeyTableCounter. - * - */ - public static enum SkewkeyTableCounter { + public enum SkewkeyTableCounter { SKEWJOINFOLLOWUPJOBS } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index d5de58e..0c0c6b0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -68,11 +68,7 @@ private static final long serialVersionUID = 1L; - /** - * Counter. - * - */ - public static enum Counter { + public enum Counter { DESERIALIZE_ERRORS } @@ -180,7 +176,7 @@ private MapOpCtx initObjectInspector(Configuration hconf, MapInputPath ctx, PartitionDesc pd = ctx.partDesc; TableDesc td = pd.getTableDesc(); - + MapOpCtx opCtx = new MapOpCtx(); // Use table properties in case of unpartitioned tables, // and the union of table properties and partition properties, with partition @@ -204,42 +200,42 @@ private MapOpCtx initObjectInspector(Configuration hconf, MapInputPath ctx, opCtx.partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter( partRawRowObjectInspector, opCtx.tblRawRowObjectInspector); - + // Next check if this table has partitions and if so // get the list of partition names as well as allocate // the serdes for the partition columns String pcols = overlayedProps.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); - + if (pcols != null && pcols.length() > 0) { String[] partKeys = pcols.trim().split("/"); String pcolTypes = overlayedProps .getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES); String[] partKeyTypes = pcolTypes.trim().split(":"); - + if (partKeys.length > partKeyTypes.length) { throw new HiveException("Internal error : partKeys length, " +partKeys.length + " greater than partKeyTypes length, " + partKeyTypes.length); } - + List partNames = new ArrayList(partKeys.length); Object[] partValues = new Object[partKeys.length]; List partObjectInspectors = new ArrayList(partKeys.length); - + for (int i = 0; i < partKeys.length; i++) { String key = partKeys[i]; partNames.add(key); ObjectInspector oi = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector (TypeInfoFactory.getPrimitiveTypeInfo(partKeyTypes[i])); - + // Partitions do not exist for this table if (partSpec == null) { // for partitionless table, initialize partValue to null partValues[i] = null; } else { - partValues[i] = + partValues[i] = ObjectInspectorConverters. getConverter(PrimitiveObjectInspectorFactory. - javaStringObjectInspector, oi).convert(partSpec.get(key)); + javaStringObjectInspector, oi).convert(partSpec.get(key)); } partObjectInspectors.add(oi); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index a2975cb..0170079 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -63,11 +63,7 @@ public class MoveTask extends Task implements Serializable { private static final long serialVersionUID = 1L; - private static transient final Log LOG = LogFactory.getLog(MoveTask.class); - - public MoveTask() { - super(); - } + private static final transient Log LOG = LogFactory.getLog(MoveTask.class); private void moveFile(Path sourcePath, Path targetPath, boolean isDfsDir) throws Exception { @@ -350,15 +346,15 @@ public int execute(DriverContext driverContext) { db.loadDynamicPartitions( tbd.getSourcePath(), tbd.getTable().getTableName(), - tbd.getPartitionSpec(), - tbd.getReplace(), - dpCtx.getNumDPCols(), - tbd.getHoldDDLTime(), - isSkewedStoredAsDirs(tbd)); + tbd.getPartitionSpec(), + tbd.getReplace(), + dpCtx.getNumDPCols(), + tbd.getHoldDDLTime(), + isSkewedStoredAsDirs(tbd)); if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { throw new HiveException("This query creates no partitions." + - " To turn off this error, set hive.error.on.empty.partition=false."); + " To turn off this error, set hive.error.on.empty.partition=false."); } // for each partition spec, get the partition @@ -412,13 +408,13 @@ public int execute(DriverContext driverContext) { numBuckets, sortCols); } - dc = new DataContainer(table.getTTable(), partn.getTPartition()); - // add this partition to post-execution hook - if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(partn, + dc = new DataContainer(table.getTTable(), partn.getTPartition()); + // add this partition to post-execution hook + if (work.getOutputs() != null) { + work.getOutputs().add(new WriteEntity(partn, (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT))); - } + } } } if (SessionState.get() != null && dc != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java index 5aae311..92fb5b5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/NodeUtils.java @@ -74,7 +74,7 @@ } } - public static interface Function { + public interface Function { void apply(T argument); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java index f2b374c..62afb54 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ObjectCache.java @@ -26,12 +26,12 @@ * @param key * @param value */ - public void cache(String key, Object value); + void cache(String key, Object value); /** * Retrieve object from cache. * @param key * @return the last cached object with the key, null if none. */ - public Object retrieve(String key); + Object retrieve(String key); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index db94271..34ccfba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -78,7 +78,7 @@ * State. * */ - public static enum State { + public enum State { UNINIT, // initialize() has not been called INIT, // initialize() has been called and close() has not been called, // or close() has been called but one of its parent is not closed. @@ -827,7 +827,7 @@ public void reset(){ * OperatorFunc. * */ - public static interface OperatorFunc { + public interface OperatorFunc { void func(Operator op); } @@ -1254,15 +1254,15 @@ public Statistics getStatistics() { } return null; } - + public OpTraits getOpTraits() { if (conf != null) { return conf.getOpTraits(); } - + return null; } - + public void setOpTraits(OpTraits metaInfo) { if (LOG.isDebugEnabled()) { LOG.debug("Setting traits ("+metaInfo+") on "+this); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java index e917cdf..f5df5cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PTFOperator.java @@ -45,61 +45,61 @@ public class PTFOperator extends Operator implements Serializable { - private static final long serialVersionUID = 1L; - boolean isMapOperator; - - transient KeyWrapperFactory keyWrapperFactory; - protected transient KeyWrapper currentKeys; - protected transient KeyWrapper newKeys; - /* - * for map-side invocation of PTFs, we cannot utilize the currentkeys null check - * to decide on invoking startPartition in streaming mode. Hence this extra flag. - */ - transient boolean firstMapRow; - transient Configuration hiveConf; - transient PTFInvocation ptfInvocation; - - /* - * 1. Find out if the operator is invoked at Map-Side or Reduce-side - * 2. Get the deserialized QueryDef - * 3. Reconstruct the transient variables in QueryDef - * 4. Create input partition to store rows coming from previous operator - */ - @Override - protected void initializeOp(Configuration jobConf) throws HiveException { - hiveConf = jobConf; - // if the parent is ExtractOperator, this invocation is from reduce-side - isMapOperator = conf.isMapSide(); - - reconstructQueryDef(hiveConf); - - if (isMapOperator) { - PartitionedTableFunctionDef tDef = conf.getStartOfChain(); - outputObjInspector = tDef.getRawInputShape().getOI(); - } else { - outputObjInspector = conf.getFuncDef().getOutputShape().getOI(); - } - - setupKeysWrapper(inputObjInspectors[0]); - - ptfInvocation = setupChain(); - ptfInvocation.initializeStreaming(jobConf, isMapOperator); - firstMapRow = true; - - super.initializeOp(jobConf); - } - - @Override - protected void closeOp(boolean abort) throws HiveException { - super.closeOp(abort); + private static final long serialVersionUID = 1L; + boolean isMapOperator; + + transient KeyWrapperFactory keyWrapperFactory; + protected transient KeyWrapper currentKeys; + protected transient KeyWrapper newKeys; + /* + * for map-side invocation of PTFs, we cannot utilize the currentkeys null check + * to decide on invoking startPartition in streaming mode. Hence this extra flag. + */ + transient boolean firstMapRow; + transient Configuration hiveConf; + transient PTFInvocation ptfInvocation; + + /* + * 1. Find out if the operator is invoked at Map-Side or Reduce-side + * 2. Get the deserialized QueryDef + * 3. Reconstruct the transient variables in QueryDef + * 4. Create input partition to store rows coming from previous operator + */ + @Override + protected void initializeOp(Configuration jobConf) throws HiveException { + hiveConf = jobConf; + // if the parent is ExtractOperator, this invocation is from reduce-side + isMapOperator = conf.isMapSide(); + + reconstructQueryDef(hiveConf); + + if (isMapOperator) { + PartitionedTableFunctionDef tDef = conf.getStartOfChain(); + outputObjInspector = tDef.getRawInputShape().getOI(); + } else { + outputObjInspector = conf.getFuncDef().getOutputShape().getOI(); + } + + setupKeysWrapper(inputObjInspectors[0]); + + ptfInvocation = setupChain(); + ptfInvocation.initializeStreaming(jobConf, isMapOperator); + firstMapRow = true; + + super.initializeOp(jobConf); + } + + @Override + protected void closeOp(boolean abort) throws HiveException { + super.closeOp(abort); ptfInvocation.finishPartition(); ptfInvocation.close(); } - @Override - public void processOp(Object row, int tag) throws HiveException - { - if (!isMapOperator ) { + @Override + public void processOp(Object row, int tag) throws HiveException + { + if (!isMapOperator ) { /* * checkif current row belongs to the current accumulated Partition: * - If not: @@ -129,51 +129,51 @@ public void processOp(Object row, int tag) throws HiveException } ptfInvocation.processRow(row); - } - - /** - * Initialize the visitor to use the QueryDefDeserializer Use the order - * defined in QueryDefWalker to visit the QueryDef - * - * @param hiveConf - * @throws HiveException - */ - protected void reconstructQueryDef(Configuration hiveConf) throws HiveException { - - PTFDeserializer dS = - new PTFDeserializer(conf, (StructObjectInspector)inputObjInspectors[0], hiveConf); - dS.initializePTFChain(conf.getFuncDef()); - } - - protected void setupKeysWrapper(ObjectInspector inputOI) throws HiveException { - PartitionDef pDef = conf.getStartOfChain().getPartition(); - List exprs = pDef.getExpressions(); - int numExprs = exprs.size(); - ExprNodeEvaluator[] keyFields = new ExprNodeEvaluator[numExprs]; - ObjectInspector[] keyOIs = new ObjectInspector[numExprs]; - ObjectInspector[] currentKeyOIs = new ObjectInspector[numExprs]; - - for(int i=0; i exprs = pDef.getExpressions(); + int numExprs = exprs.size(); + ExprNodeEvaluator[] keyFields = new ExprNodeEvaluator[numExprs]; + ObjectInspector[] keyOIs = new ObjectInspector[numExprs]; + ObjectInspector[] currentKeyOIs = new ObjectInspector[numExprs]; + + for(int i=0; i fnDefs = new Stack(); PTFInputDef iDef = conf.getFuncDef(); @@ -197,9 +197,9 @@ private PTFInvocation setupChain() { fnDefs.push((PartitionedTableFunctionDef) iDef); iDef = ((PartitionedTableFunctionDef) iDef).getInput(); } - + PTFInvocation curr = null, first = null; - + while(!fnDefs.isEmpty()) { PartitionedTableFunctionDef currFn = fnDefs.pop(); curr = new PTFInvocation(curr, currFn.getTFunction()); @@ -222,26 +222,26 @@ public static void connectLeadLagFunctionsToPartition(PTFDesc ptfDesc, llFn.setpItr(pItr); } } - + /* * Responsible for the flow of rows through the PTF Chain. - * An Invocation wraps a TableFunction. - * The PTFOp hands the chain each row through the processRow call. + * An Invocation wraps a TableFunction. + * The PTFOp hands the chain each row through the processRow call. * It also notifies the chain of when a Partition starts/finishes. - * + * * There are several combinations depending * whether the TableFunction and its successor support Streaming or Batch mode. - * + * * Combination 1: Streaming + Streaming * - Start Partition: invoke startPartition on tabFn. - * - Process Row: invoke process Row on tabFn. + * - Process Row: invoke process Row on tabFn. * Any output rows hand to next tabFn in chain or forward to next Operator. * - Finish Partition: invoke finishPartition on tabFn. * Any output rows hand to next tabFn in chain or forward to next Operator. - * + * * Combination 2: Streaming + Batch * same as Combination 1 - * + * * Combination 3: Batch + Batch * - Start Partition: create or reset the Input Partition for the tabFn * caveat is: if prev is also batch and it is not providing an Output Iterator @@ -251,22 +251,22 @@ public static void connectLeadLagFunctionsToPartition(PTFDesc ptfDesc, * If function gives an Output Partition: set it on next Invocation's Input Partition * If function gives an Output Iterator: iterate and call processRow on next Invocation. * For last Invocation in chain: forward rows to next Operator. - * + * * Combination 3: Batch + Stream * Similar to Combination 3, except Finish Partition behavior slightly different * - Finish Partition : invoke evaluate on tabFn on Input Partition * iterate output rows: hand to next tabFn in chain or forward to next Operator. - * + * */ class PTFInvocation { - + PTFInvocation prev; PTFInvocation next; TableFunctionEvaluator tabFn; PTFPartition inputPart; PTFPartition outputPart; Iterator outputPartRowsItr; - + public PTFInvocation(PTFInvocation prev, TableFunctionEvaluator tabFn) { this.prev = prev; this.tabFn = tabFn; @@ -274,19 +274,19 @@ public PTFInvocation(PTFInvocation prev, TableFunctionEvaluator tabFn) { prev.next = this; } } - + boolean isOutputIterator() { return tabFn.canAcceptInputAsStream() || tabFn.canIterateOutput(); } - + boolean isStreaming() { return tabFn.canAcceptInputAsStream(); } - + void initializeStreaming(Configuration cfg, boolean isMapSide) throws HiveException { PartitionedTableFunctionDef tabDef = tabFn.getTableDef(); PTFInputDef inputDef = tabDef.getInput(); - ObjectInspector inputOI = conf.getStartOfChain() == tabDef ? + ObjectInspector inputOI = conf.getStartOfChain() == tabDef ? inputObjInspectors[0] : inputDef.getOutputShape().getOI(); tabFn.initializeStreaming(cfg, (StructObjectInspector) inputOI, isMapSide); @@ -295,7 +295,7 @@ void initializeStreaming(Configuration cfg, boolean isMapSide) throws HiveExcept next.initializeStreaming(cfg, isMapSide); } } - + void startPartition() throws HiveException { if ( isStreaming() ) { tabFn.startPartition(); @@ -312,7 +312,7 @@ void startPartition() throws HiveException { next.startPartition(); } } - + void processRow(Object row) throws HiveException { if ( isStreaming() ) { handleOutputRows(tabFn.processRow(row)); @@ -320,7 +320,7 @@ void processRow(Object row) throws HiveException { inputPart.append(row); } } - + void handleOutputRows(List outRows) throws HiveException { if ( outRows != null ) { for (Object orow : outRows ) { @@ -332,7 +332,7 @@ void handleOutputRows(List outRows) throws HiveException { } } } - + void finishPartition() throws HiveException { if ( isStreaming() ) { handleOutputRows(tabFn.finishPartition()); @@ -353,7 +353,7 @@ void finishPartition() throws HiveException { } } } - + if ( next != null ) { next.finishPartition(); } else { @@ -364,7 +364,7 @@ void finishPartition() throws HiveException { } } } - + /** * Create a new Partition. * A partition has 2 OIs: the OI for the rows being put in and the OI for the rows @@ -388,7 +388,7 @@ void finishPartition() throws HiveException { private void createInputPartition() throws HiveException { PartitionedTableFunctionDef tabDef = tabFn.getTableDef(); PTFInputDef inputDef = tabDef.getInput(); - ObjectInspector inputOI = conf.getStartOfChain() == tabDef ? + ObjectInspector inputOI = conf.getStartOfChain() == tabDef ? inputObjInspectors[0] : inputDef.getOutputShape().getOI(); SerDe serde = conf.isMapSide() ? tabDef.getInput().getOutputShape().getSerde() : @@ -400,7 +400,7 @@ private void createInputPartition() throws HiveException { (StructObjectInspector) inputOI, outputOI); } - + void close() { if ( inputPart != null ) { inputPart.close(); @@ -411,5 +411,5 @@ void close() { } } } - + } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java index 21d85f1..cd18e0a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java @@ -53,7 +53,7 @@ protected PTFPartition(Configuration cfg, throws HiveException { this(cfg, serDe, inputOI, outputOI, true); } - + protected PTFPartition(Configuration cfg, SerDe serDe, StructObjectInspector inputOI, StructObjectInspector outputOI, @@ -212,14 +212,14 @@ public PTFPartition getPartition() { public void reset() { idx = start; } - }; + } /* * provide an Iterator on the rows in a Partiton. * Iterator exposes the index of the next location. * Client can invoke lead/lag relative to the next location. */ - public static interface PTFPartitionIterator extends Iterator { + public interface PTFPartitionIterator extends Iterator { int getIndex(); T lead(int amt) throws HiveException; @@ -245,7 +245,7 @@ public static PTFPartition create(Configuration cfg, throws HiveException { return new PTFPartition(cfg, serDe, inputOI, outputOI); } - + public static PTFRollingPartition createRolling(Configuration cfg, SerDe serDe, StructObjectInspector inputOI, diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java index 6c11637..18f2fca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java @@ -127,9 +127,7 @@ public void remove() public static abstract class Predicate { public abstract boolean apply(T obj); - }; - - + } /* * serialization functions diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java index 166461a..754e524 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java @@ -46,7 +46,7 @@ public class PartitionKeySampler implements OutputCollector { public static final Comparator C = new Comparator() { - public final int compare(byte[] o1, byte[] o2) { + public int compare(byte[] o1, byte[] o2) { return WritableComparator.compareBytes(o1, 0, o1.length, o2, 0, o2.length); } }; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionTableFunctionDescription.java ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionTableFunctionDescription.java index bd48531..90afc61 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionTableFunctionDescription.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionTableFunctionDescription.java @@ -27,14 +27,14 @@ import org.apache.hadoop.hive.ql.udf.ptf.WindowingTableFunction; @Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE}) +@Target(ElementType.TYPE) @Documented -public @interface PartitionTableFunctionDescription -{ - Description description (); +public @interface PartitionTableFunctionDescription { + Description description (); - /** - * if true it is not usable in the language. {@link WindowingTableFunction} is the only internal function. - */ - boolean isInternal() default false; + /** + * if true it is not usable in the language. {@link WindowingTableFunction} + * is the only internal function. + */ + boolean isInternal() default false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index 5b857e2..cb2e7ab 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -60,11 +60,7 @@ private static final long serialVersionUID = 1L; - /** - * Counter. - * - */ - public static enum Counter { + public enum Counter { DESERIALIZE_ERRORS, SERIALIZE_ERRORS } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index d7418db..cf11aa1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -95,7 +95,7 @@ private transient boolean fetchSource; - public static enum FeedType { + public enum FeedType { DYNAMIC_PARTITIONS, // list of dynamic partitions } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java index 484006a..093c4a9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TopNHash.java @@ -49,8 +49,8 @@ * For interaction between operator and top-n hash. * Currently only used to forward key/values stored in hash. */ - public static interface BinaryCollector { - public void collect(byte[] key, byte[] value, int hash) throws IOException; + public interface BinaryCollector { + void collect(byte[] key, byte[] value, int hash) throws IOException; } public static final int FORWARD = -1; // Forward the row to reducer as is. @@ -202,7 +202,7 @@ public void tryStoreVectorizedKey(HiveKey key, boolean partColsIsNull, int batch Integer collisionIndex = indexes.store(index); if (null != collisionIndex) { /* - * since there is a collision index will be used for the next value + * since there is a collision index will be used for the next value * so have the map point back to original index. */ if ( indexes instanceof HashForGroup ) { @@ -286,7 +286,7 @@ public int getVectorizedKeyDistLength(int batchIndex) { public int getVectorizedKeyHashCode(int batchIndex) { return hashes[batchIndexToResult[batchIndex]]; } - + /** * Stores the value for the key in the heap. * @param index The index, either from tryStoreKey or from tryStoreVectorizedKey result. @@ -377,7 +377,7 @@ private void flushInternal() throws IOException, HiveException { } excluded = 0; } - + private interface IndexStore { int size(); /** diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 4cf4522..24bc0ba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -206,9 +206,9 @@ * KEY: record key * VALUE: record value */ - public static enum ReduceField { + public enum ReduceField { KEY, VALUE - }; + } public static List reduceFieldNameList; static { @@ -976,7 +976,7 @@ protected synchronized Kryo initialValue() { removeField(kryo, MapWork.class, "opParseCtxMap"); removeField(kryo, MapWork.class, "joinTree"); return kryo; - }; + } }; @SuppressWarnings("rawtypes") protected static void removeField(Kryo kryo, Class type, String fieldName) { @@ -995,7 +995,7 @@ protected synchronized Kryo initialValue() { kryo.register(Path.class, new PathSerializer()); kryo.setInstantiatorStrategy(new StdInstantiatorStrategy()); return kryo; - }; + } }; public static TableDesc defaultTd; @@ -1185,7 +1185,7 @@ public static String abbreviate(String str, int max) { * StreamStatus. * */ - public static enum StreamStatus { + public enum StreamStatus { EOF, TERMINATED } @@ -1355,14 +1355,13 @@ public static String getFileExtension(JobConf jc, boolean isCompressed, throws IOException { CompressionCodec codec = null; CompressionType compressionType = CompressionType.NONE; - Class codecClass = null; if (isCompressed) { compressionType = SequenceFileOutputFormat.getOutputCompressionType(jc); - codecClass = FileOutputFormat.getOutputCompressorClass(jc, DefaultCodec.class); + Class codecClass = FileOutputFormat.getOutputCompressorClass(jc, DefaultCodec.class); codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, jc); } - return (SequenceFile.createWriter(fs, jc, file, keyClass, valClass, compressionType, codec, - progressable)); + return SequenceFile.createWriter(fs, jc, file, keyClass, valClass, compressionType, codec, + progressable); } @@ -3427,7 +3426,7 @@ public static boolean createDirsWithPermission(Configuration conf, Path mkdir, return createDirsWithPermission(conf, mkdir, fsPermission, recursive); } - private static void resetConfAndCloseFS (Configuration conf, boolean unsetUmask, + private static void resetConfAndCloseFS (Configuration conf, boolean unsetUmask, String origUmask, FileSystem fs) throws IOException { if (unsetUmask) { if (origUmask != null) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java index f61eab4..b2be226 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionDescription.java @@ -28,39 +28,38 @@ import org.apache.hadoop.hive.ql.udf.ptf.WindowingTableFunction; @Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.TYPE}) +@Target(ElementType.TYPE) @Documented -public @interface WindowFunctionDescription -{ - Description description (); - /** - * controls whether this function can be applied to a Window. - *

- * Ranking function: Rank, Dense_Rank, Percent_Rank and Cume_Dist don't operate on Windows. - * Why? a window specification implies a row specific range i.e. every row gets its own set of rows to process the UDAF on. - * For ranking defining a set of rows for every row makes no sense. - *

- * All other UDAFs can be computed for a Window. - */ - boolean supportsWindow() default true; - /** - * A WindowFunc is implemented as {@link GenericUDAFResolver2}. It returns only one value. - * If this is true then the function must return a List which is taken to be the column for this function in the Output table returned by the - * {@link WindowingTableFunction}. Otherwise the output is assumed to be a single value, the column of the Output will contain the same value - * for all the rows. - */ - boolean pivotResult() default false; +public @interface WindowFunctionDescription { + Description description (); + /** + * controls whether this function can be applied to a Window. + *

+ * Ranking function: Rank, Dense_Rank, Percent_Rank and Cume_Dist don't operate on Windows. + * Why? a window specification implies a row specific range i.e. every row gets its own set of rows to process the UDAF on. + * For ranking defining a set of rows for every row makes no sense. + *

+ * All other UDAFs can be computed for a Window. + */ + boolean supportsWindow() default true; + /** + * A WindowFunc is implemented as {@link GenericUDAFResolver2}. It returns only one value. + * If this is true then the function must return a List which is taken to be the column for this function in the Output table returned by the + * {@link WindowingTableFunction}. Otherwise the output is assumed to be a single value, the column of the Output will contain the same value + * for all the rows. + */ + boolean pivotResult() default false; - /** - * Used in translations process to validate arguments - * @return true if ranking function - */ - boolean rankingFunction() default false; + /** + * Used in translations process to validate arguments + * @return true if ranking function + */ + boolean rankingFunction() default false; - /** - * Using in analytical functions to specify that UDF implies an ordering - * @return true if the function implies order - */ - boolean impliesOrder() default false; + /** + * Using in analytical functions to specify that UDF implies an ordering + * @return true if the function implies order + */ + boolean impliesOrder() default false; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionInfo.java ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionInfo.java index 9b2d787..d5219b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/WindowFunctionInfo.java @@ -22,45 +22,41 @@ import org.apache.hive.common.util.AnnotationUtils; @SuppressWarnings("deprecation") -public class WindowFunctionInfo implements CommonFunctionInfo -{ - boolean supportsWindow = true; - boolean pivotResult = false; - boolean impliesOrder = false; - FunctionInfo fInfo; +public class WindowFunctionInfo implements CommonFunctionInfo { - WindowFunctionInfo(FunctionInfo fInfo) - { - assert fInfo.isGenericUDAF(); - this.fInfo = fInfo; - Class wfnCls = fInfo.getGenericUDAFResolver().getClass(); - WindowFunctionDescription def = - AnnotationUtils.getAnnotation(wfnCls, WindowFunctionDescription.class); - if ( def != null) - { - supportsWindow = def.supportsWindow(); - pivotResult = def.pivotResult(); - impliesOrder = def.impliesOrder(); - } - } + boolean supportsWindow = true; + boolean pivotResult = false; + boolean impliesOrder = false; + FunctionInfo fInfo; - public boolean isSupportsWindow() - { - return supportsWindow; - } + WindowFunctionInfo(FunctionInfo fInfo) { + assert fInfo.isGenericUDAF(); + this.fInfo = fInfo; + Class wfnCls = fInfo.getGenericUDAFResolver().getClass(); + WindowFunctionDescription def = + AnnotationUtils.getAnnotation(wfnCls, WindowFunctionDescription.class); + if (def != null) { + supportsWindow = def.supportsWindow(); + pivotResult = def.pivotResult(); + impliesOrder = def.impliesOrder(); + } + } + + public boolean isSupportsWindow() { + return supportsWindow; + } - public boolean isPivotResult() - { - return pivotResult; - } + public boolean isPivotResult() { + return pivotResult; + } - public boolean isImpliesOrder(){ - return impliesOrder; - } - public FunctionInfo getfInfo() - { - return fInfo; - } + public boolean isImpliesOrder() { + return impliesOrder; + } + + public FunctionInfo getfInfo() { + return fInfo; + } @Override public Class getFunctionClass() { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java index 905e65e..547c2bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java @@ -63,7 +63,7 @@ public MapJoinMemoryExhaustionHandler(LogHelper console, double maxMemoryUsage) if(maxHeapSize == -1) { this.maxHeapSize = 200L * 1024L * 1024L; LOG.warn("MemoryMXBean.getHeapMemoryUsage().getMax() returned -1, " + - "defaulting maxHeapSize to 200MB"); + "defaulting maxHeapSize to 200MB"); } else { this.maxHeapSize = maxHeapSize; } @@ -91,4 +91,4 @@ public void checkMemoryStatus(long tableContainerSize, long numRows) throw new MapJoinMemoryExhaustionException(msg); } } -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java index bb501c6..dbe8063 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHook.java @@ -25,8 +25,8 @@ @SuppressWarnings("deprecation") public interface HadoopJobExecHook { - - public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg); - public void logPlanProgress(SessionState ss) throws IOException; - + + boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg); + void logPlanProgress(SessionState ss) throws IOException; + } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java index 7ef5ebd..d22c312 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/AbstractRowContainer.java @@ -22,27 +22,27 @@ public interface AbstractRowContainer { - public interface RowIterator { - public ROW first() throws HiveException; - public ROW next() throws HiveException; + interface RowIterator { + ROW first() throws HiveException; + ROW next() throws HiveException; } - public RowIterator rowIter() throws HiveException; + RowIterator rowIter() throws HiveException; /** * add a row into the RowContainer * * @param t row */ - public void addRow(ROW t) throws HiveException; + void addRow(ROW t) throws HiveException; /** * @return number of elements in the RowContainer */ - public int rowCount() throws HiveException; + int rowCount() throws HiveException; /** * Remove all elements in the RowContainer. */ - public void clearRows() throws HiveException; + void clearRows() throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java index cdb5dc5..c00c7e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java @@ -166,19 +166,19 @@ public BytesBytesMultiHashMap(int initialCapacity, float loadFactor, int wbSize) } /** The source of keys and values to put into hashtable; avoids byte copying. */ - public static interface KvSource { + public interface KvSource { /** Write key into output. */ - public void writeKey(RandomAccessOutput dest) throws SerDeException; + void writeKey(RandomAccessOutput dest) throws SerDeException; /** Write value into output. */ - public void writeValue(RandomAccessOutput dest) throws SerDeException; + void writeValue(RandomAccessOutput dest) throws SerDeException; /** * Provide updated value for state byte for a key. * @param previousValue Previous value; null if this is the first call per key. * @return The updated value. */ - public byte updateStateByte(Byte previousValue); + byte updateStateByte(Byte previousValue); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index 629457c..d826ef4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -91,7 +91,7 @@ private LazyBinaryStructObjectInspector createInternalOi( .getLazyBinaryStructObjectInspector(colNames, colOis); } - private static interface KeyValueHelper extends BytesBytesMultiHashMap.KvSource { + private interface KeyValueHelper extends BytesBytesMultiHashMap.KvSource { void setKeyValue(Writable key, Writable val) throws SerDeException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java index ff6e5d4..75f7419 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainer.java @@ -35,7 +35,7 @@ * for each key. "Old" HashMapWrapper will still create/retrieve new objects for java HashMap; * but the optimized one doesn't have to. */ - public interface ReusableGetAdaptor { + interface ReusableGetAdaptor { /** * Changes current rows to which adaptor is referring to the rows corresponding to * the key represented by a VHKW object, and writers and batch used to interpret it. diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java index a22c3c2..da36848 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java @@ -32,7 +32,7 @@ @SuppressWarnings("deprecation") public class MapJoinTableContainerSerDe { - + private final MapJoinObjectSerDeContext keyContext; private final MapJoinObjectSerDeContext valueContext; public MapJoinTableContainerSerDe(MapJoinObjectSerDeContext keyContext, @@ -70,7 +70,7 @@ public MapJoinPersistableTableContainer load(ObjectInputStream in) } try { Writable keyContainer = keySerDe.getSerializedClass().newInstance(); - Writable valueContainer = valueSerDe.getSerializedClass().newInstance(); + Writable valueContainer = valueSerDe.getSerializedClass().newInstance(); int numKeys = in.readInt(); for (int keyIndex = 0; keyIndex < numKeys; keyIndex++) { MapJoinKeyObject key = new MapJoinKeyObject(); @@ -89,7 +89,7 @@ public MapJoinPersistableTableContainer load(ObjectInputStream in) public void persist(ObjectOutputStream out, MapJoinPersistableTableContainer tableContainer) throws HiveException { int numKeys = tableContainer.size(); - try { + try { out.writeUTF(tableContainer.getClass().getName()); out.writeObject(tableContainer.getMetaData()); out.writeInt(numKeys); @@ -108,7 +108,7 @@ public void persist(ObjectOutputStream out, MapJoinPersistableTableContainer tab throw new ConcurrentModificationException("TableContainer was modified while persisting: " + tableContainer); } } - + public static void persistDummyTable(ObjectOutputStream out) throws IOException { MapJoinPersistableTableContainer tableContainer = new HashMapWrapper(); out.writeUTF(tableContainer.getClass().getName()); @@ -127,8 +127,8 @@ private MapJoinPersistableTableContainer create( return constructor.newInstance(metaData); } catch (Exception e) { String msg = "Error while attemping to create table container" + - " of type: " + name + ", with metaData: " + metaData; + " of type: " + name + ", with metaData: " + metaData; throw new HiveException(msg, e); } } -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java index 360096b..906d1ee 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java @@ -269,8 +269,8 @@ public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Properties tableProperties, Progressable progress) throws IOException { FileSystem fs = finalOutPath.getFileSystem(jc); - final SequenceFile.Writer outStream = Utilities.createSequenceWriter(jc, fs, finalOutPath, - BytesWritable.class, valueClass, isCompressed, progress); + SequenceFile.Writer outStream = Utilities.createSequenceWriter(jc, fs, finalOutPath, + BytesWritable.class, valueClass, isCompressed, progress); return new PTFRecordWriter(outStream); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java index 630046d..13c8cf7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationBufferBatch.java @@ -62,7 +62,7 @@ /** * Array of indexes for aggregators that have variable size */ - private int[] variableSizeAggregators;; + private int[] variableSizeAggregators; /** * returns True if any of the aggregators has a variable size diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index 07d7b8b..0857585 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -88,10 +88,10 @@ /** * Interface for processing mode: global, hash or streaming */ - private static interface IProcessingMode { - public void initialize(Configuration hconf) throws HiveException; - public void processBatch(VectorizedRowBatch batch) throws HiveException; - public void close(boolean aborted) throws HiveException; + private interface IProcessingMode { + void initialize(Configuration hconf) throws HiveException; + void processBatch(VectorizedRowBatch batch) throws HiveException; + void close(boolean aborted) throws HiveException; } /** @@ -150,7 +150,7 @@ protected VectorAggregationBufferRow allocateAggregationBuffer() throws HiveExce private class ProcessingModeGlobalAggregate extends ProcessingModeBase { /** - * In global processing mode there is only one set of aggregation buffers + * In global processing mode there is only one set of aggregation buffers */ private VectorAggregationBufferRow aggregationBuffers; @@ -206,7 +206,7 @@ public void close(boolean aborted) throws HiveException { private long sumBatchSize; /** - * Max number of entries in the vector group by aggregation hashtables. + * Max number of entries in the vector group by aggregation hashtables. * Exceeding this will trigger a flush irrelevant of memory pressure condition. */ private int maxHtEntries = 1000000; @@ -220,12 +220,12 @@ public void close(boolean aborted) throws HiveException { * Percent of entries to flush when memory threshold exceeded. */ private float percentEntriesToFlush = 0.1f; - + /** * A soft reference used to detect memory pressure */ private SoftReference gcCanary = new SoftReference(new Object()); - + /** * Counts the number of time the gcCanary died and was resurrected */ @@ -262,7 +262,7 @@ public void initialize(Configuration hconf) throws HiveException { HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); this.numRowsCompareHashAggr = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL); - } + } else { this.percentEntriesToFlush = HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT.defaultFloatVal; @@ -295,14 +295,14 @@ public void processBatch(VectorizedRowBatch batch) throws HiveException { processAggregators(batch); //Flush if memory limits were reached - // We keep flushing until the memory is under threshold + // We keep flushing until the memory is under threshold int preFlushEntriesCount = numEntriesHashTable; while (shouldFlush(batch)) { flush(false); if(gcCanary.get() == null) { gcCanaryFlushes++; - gcCanary = new SoftReference(new Object()); + gcCanary = new SoftReference(new Object()); } //Validate that some progress is being made @@ -441,7 +441,7 @@ private void flush(boolean all) throws HiveException { mapKeysAggregationBuffers.clear(); numEntriesHashTable = 0; } - + if (all && LOG.isDebugEnabled()) { LOG.debug(String.format("GC canary caused %d flushes", gcCanaryFlushes)); } @@ -468,7 +468,7 @@ private boolean shouldFlush(VectorizedRowBatch batch) { if (gcCanary.get() == null) { return true; } - + return false; } @@ -488,14 +488,14 @@ private void updateAvgVariableSize(VectorizedRowBatch batch) { } /** - * Checks if the HT reduces the number of entries by at least minReductionHashAggr factor + * Checks if the HT reduces the number of entries by at least minReductionHashAggr factor * @throws HiveException */ private void checkHashModeEfficiency() throws HiveException { if (lastModeCheckRowCount > numRowsCompareHashAggr) { lastModeCheckRowCount = 0; if (LOG.isDebugEnabled()) { - LOG.debug(String.format("checkHashModeEfficiency: HT:%d RC:%d MIN:%d", + LOG.debug(String.format("checkHashModeEfficiency: HT:%d RC:%d MIN:%d", numEntriesHashTable, sumBatchSize, (long)(sumBatchSize * minReductionHashAggr))); } if (numEntriesHashTable > sumBatchSize * minReductionHashAggr) { @@ -513,7 +513,7 @@ private void checkHashModeEfficiency() throws HiveException { */ private class ProcessingModeStreaming extends ProcessingModeBase { - /** + /** * The aggreagation buffers used in streaming mode */ private VectorAggregationBufferRow currentStreamingAggregators; @@ -526,19 +526,19 @@ private void checkHashModeEfficiency() throws HiveException { /** * The keys that needs to be flushed at the end of the current batch */ - private final VectorHashKeyWrapper[] keysToFlush = + private final VectorHashKeyWrapper[] keysToFlush = new VectorHashKeyWrapper[VectorizedRowBatch.DEFAULT_SIZE]; /** * The aggregates that needs to be flushed at the end of the current batch */ - private final VectorAggregationBufferRow[] rowsToFlush = + private final VectorAggregationBufferRow[] rowsToFlush = new VectorAggregationBufferRow[VectorizedRowBatch.DEFAULT_SIZE]; /** * A pool of VectorAggregationBufferRow to avoid repeated allocations */ - private VectorUtilBatchObjectPool + private VectorUtilBatchObjectPool streamAggregationBufferRowPool; @Override @@ -693,9 +693,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { /** * changes the processing mode to streaming - * This is done at the request of the hash agg mode, if the number of keys + * This is done at the request of the hash agg mode, if the number of keys * exceeds the minReductionHashAggr factor - * @throws HiveException + * @throws HiveException */ private void changeToStreamingMode() throws HiveException { processingMode = this.new ProcessingModeStreaming(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java index 5f6df78..4692e33 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java @@ -146,7 +146,7 @@ protected Object clone() { duplicateTo(clone); return clone; } - + public void duplicateTo(VectorHashKeyWrapper clone) { clone.longValues = longValues.clone(); clone.doubleValues = doubleValues.clone(); @@ -155,7 +155,7 @@ public void duplicateTo(VectorHashKeyWrapper clone) { // Decimal128 requires deep clone clone.decimalValues = new Decimal128[decimalValues.length]; for(int i = 0; i < decimalValues.length; ++i) { - clone.decimalValues[i] = new Decimal128().update(decimalValues[i]); + clone.decimalValues[i] = new Decimal128().update(decimalValues[i]); } clone.byteValues = new byte[byteValues.length][]; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java index 607f641..f90f559 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java @@ -167,7 +167,7 @@ public void assign(VectorExpressionWriter[] writers, List oids) // This is a vectorized aware evaluator ExprNodeEvaluator eval = new ExprNodeEvaluator(desc) { - int columnIndex;; + int columnIndex; int writerIndex; public ExprNodeEvaluator initVectorExpr(int columnIndex, int writerIndex) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java index aa0afe7..7e49f86 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorSMBMapJoinOperator.java @@ -175,7 +175,7 @@ public SMBJoinKeyEvaluator init() { key.set(i, keyWrapperBatch.getWritableKeyValue(kw, i, keyOutputWriters[i])); } return key; - }; + } }.init(); Map> valueExpressions = conf.getExprs(); @@ -202,7 +202,7 @@ public void assign(VectorExpressionWriter[] writers, List oids) // This is a vectorized aware evaluator ExprNodeEvaluator eval = new ExprNodeEvaluator(desc) { - int columnIndex;; + int columnIndex; int writerIndex; public ExprNodeEvaluator initVectorExpr(int columnIndex, int writerIndex) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorUtilBatchObjectPool.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorUtilBatchObjectPool.java index da8646b..983558a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorUtilBatchObjectPool.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorUtilBatchObjectPool.java @@ -20,37 +20,34 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; -/** - * - */ public class VectorUtilBatchObjectPool { private final T[] buffer; - + /** * Head of the pool. This is where where we should insert the next - * object returned to the pool + * object returned to the pool */ private int head = 0; - + /** * Count of available elements. They are behind the head, with wrap-around * The head itself is not free, is null */ private int count = 0; - - private IAllocator allocator; - - public static interface IAllocator { - public T alloc() throws HiveException; - public void free(T t); + + private IAllocator allocator; + + public interface IAllocator { + T alloc() throws HiveException; + void free(T t); } - + @SuppressWarnings("unchecked") public VectorUtilBatchObjectPool(int size, IAllocator allocator) { buffer = (T[]) new Object[size]; this.allocator = allocator; } - + public T getFromPool() throws HiveException { T ret = null; if (count == 0) { @@ -63,10 +60,10 @@ public T getFromPool() throws HiveException { buffer[tail] = null; --count; } - + return ret; } - + public void putInPool(T object) { if (count < buffer.length) { buffer[head] = object; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index 535e4b3..a6024e3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -512,7 +512,7 @@ private ExprNodeDesc getImplicitCastExpression(GenericUDF udf, ExprNodeDesc chil } return null; } - + private int getPrecisionForType(PrimitiveTypeInfo typeInfo) { if (isFloatFamily(typeInfo.getTypeName())) { return HiveDecimal.MAX_PRECISION; @@ -569,8 +569,8 @@ private GenericUDF getGenericUDFForCast(TypeInfo castType) throws HiveException ((GenericUDFBridge) genericUdf).setUdfClassName(udfClass.getClass().getName()); } if (genericUdf instanceof SettableUDF) { - ((SettableUDF)genericUdf).setTypeInfo(castType); - } + ((SettableUDF) genericUdf).setTypeInfo(castType); + } return genericUdf; } @@ -662,7 +662,7 @@ ExprNodeDesc foldConstantsForUnaryExpression(ExprNodeDesc exprDesc) throws HiveE if (!(exprDesc instanceof ExprNodeGenericFuncDesc)) { return exprDesc; } - + if (exprDesc.getChildren() == null || (exprDesc.getChildren().size() != 1) ) { return exprDesc; } @@ -693,7 +693,7 @@ ExprNodeDesc foldConstantsForUnaryExpression(ExprNodeDesc exprDesc) throws HiveE ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(exprDesc); ObjectInspector output = evaluator.initialize(childoi); Object constant = evaluator.evaluate(null); - Object java = ObjectInspectorUtils.copyToStandardJavaObject(constant, output); + Object java = ObjectInspectorUtils.copyToStandardJavaObject(constant, output); return new ExprNodeConstantDesc(exprDesc.getTypeInfo(), java); } @@ -724,7 +724,7 @@ private VectorExpression getConstantVectorExpression(Object constantValue, TypeI outCol = ocm.allocateOutputColumn(colVectorType); } if (constantValue == null) { - return new ConstantVectorExpression(outCol, type, true); + return new ConstantVectorExpression(outCol, type, true); } else if (decimalTypePattern.matcher(type).matches()) { VectorExpression ve = new ConstantVectorExpression(outCol, (Decimal128) constantValue); ve.setOutputType(typeInfo.getTypeName()); @@ -1177,7 +1177,7 @@ private VectorExpression getCastToDoubleExpression(Class udf, List oids); } @@ -1005,25 +1005,25 @@ public static void processVectorExpressions( */ public static VectorExpressionWriter[] getExpressionWriters(StructObjectInspector objInspector) throws HiveException { - + if (objInspector.isSettable()) { return getSettableExpressionWriters((SettableStructObjectInspector) objInspector); } - + List allFieldRefs = objInspector.getAllStructFieldRefs(); - + VectorExpressionWriter[] expressionWriters = new VectorExpressionWriter[allFieldRefs.size()]; - + for(int i=0; i fieldsRef = objInspector.getAllStructFieldRefs(); + List fieldsRef = objInspector.getAllStructFieldRefs(); VectorExpressionWriter[] writers = new VectorExpressionWriter[fieldsRef.size()]; for(int i=0; i values) throws IOException; } @@ -86,7 +86,7 @@ * Info. * */ - public static class Info { + class Info { } @@ -94,38 +94,37 @@ * SessionInfo. * */ - public static class SessionInfo extends Info { + class SessionInfo extends Info { public String sessionId; - }; + } /** * QueryInfo. * */ - public static class QueryInfo extends Info { + class QueryInfo extends Info { public Map hm = new HashMap(); public Map rowCountMap = new HashMap(); - }; + } /** * TaskInfo. * */ - public static class TaskInfo extends Info { + class TaskInfo extends Info { public Map hm = new HashMap(); - }; - + } /** * @return historyFileName */ - public String getHistFileName(); + String getHistFileName(); /** * Called at the start of query execution in Driver.execute(). */ - public void startQuery(String cmd, String id); + void startQuery(String cmd, String id); /** * Used to set query status and other attributes of a query @@ -134,7 +133,7 @@ * @param propName * @param propValue */ - public void setQueryProperty(String queryId, Keys propName, String propValue); + void setQueryProperty(String queryId, Keys propName, String propValue); /** * Used to set task properties. @@ -143,8 +142,7 @@ * @param propName * @param propValue */ - public void setTaskProperty(String queryId, String taskId, Keys propName, - String propValue); + void setTaskProperty(String queryId, String taskId, Keys propName, String propValue); /** * Serialize the task counters and set as a task property. @@ -153,16 +151,16 @@ public void setTaskProperty(String queryId, String taskId, Keys propName, * @param taskId * @param ctrs */ - public void setTaskCounters(String queryId, String taskId, Counters ctrs); + void setTaskCounters(String queryId, String taskId, Counters ctrs); - public void printRowCount(String queryId); + void printRowCount(String queryId); /** * Called at the end of a query * * @param queryId */ - public void endQuery(String queryId); + void endQuery(String queryId); /** * Called at the start of a task. Called by Driver.run() A query can have @@ -170,15 +168,14 @@ public void setTaskProperty(String queryId, String taskId, Keys propName, * * @param task */ - public void startTask(String queryId, Task task, - String taskName); + void startTask(String queryId, Task task, String taskName); /** * Called at the end of a task. * * @param task */ - public void endTask(String queryId, Task task); + void endTask(String queryId, Task task); /** * Logs progress of a task if ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS is @@ -186,7 +183,7 @@ public void startTask(String queryId, Task task, * * @param task */ - public void progressTask(String queryId, Task task); + void progressTask(String queryId, Task task); /** @@ -194,7 +191,7 @@ public void startTask(String queryId, Task task, * @param plan * @throws IOException */ - public void logPlanProgress(QueryPlan plan) throws IOException; + void logPlanProgress(QueryPlan plan) throws IOException; /** @@ -202,12 +199,12 @@ public void startTask(String queryId, Task task, * * @param map */ - public void setIdToTableMap(Map map); + void setIdToTableMap(Map map); /** * Close the log file stream */ - public void closeStream(); + void closeStream(); diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java index cdc929a..1d400fc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java @@ -51,10 +51,14 @@ private static final Object LOCK = new Object(); private static ExecutorService executor; private static TimelineClient timelineClient; - private enum EntityTypes { HIVE_QUERY_ID }; - private enum EventTypes { QUERY_SUBMITTED, QUERY_COMPLETED }; - private enum OtherInfoTypes { QUERY, STATUS, TEZ, MAPRED }; - private enum PrimaryFilterTypes { user }; + private enum EntityTypes { HIVE_QUERY_ID } + + private enum EventTypes { QUERY_SUBMITTED, QUERY_COMPLETED } + + private enum OtherInfoTypes { QUERY, STATUS, TEZ, MAPRED } + + private enum PrimaryFilterTypes { user } + private static final int WAIT_TIME = 3; public ATSHook() { diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java index 50c76db..4022550 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java @@ -39,7 +39,7 @@ /** * The type of the entity. */ - public static enum Type { + public enum Type { DATABASE, TABLE, PARTITION, DUMMYPARTITION, DFS_DIR, LOCAL_DIR, FUNCTION } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java index 49c095a..97dd033 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java @@ -37,7 +37,7 @@ */ public class HookContext { - static public enum HookType { + public enum HookType { PRE_EXEC_HOOK, POST_EXEC_HOOK, ON_FAILURE_HOOK } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java index 6ff2977..5a053bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java @@ -50,7 +50,7 @@ * of a user script through a TRANSFORM, MAP or REDUCE syntax * or from the output of a PTF chain execution. */ - public static enum DependencyType { + public enum DependencyType { SIMPLE, EXPRESSION, SCRIPT } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java index 8ca9b1b..ac44ab5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/PreExecute.java @@ -42,8 +42,8 @@ * The user group security information. */ @Deprecated - public void run(SessionState sess, Set inputs, - Set outputs, UserGroupInformation ugi) + void run(SessionState sess, Set inputs, Set outputs, + UserGroupInformation ugi) throws Exception; } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java index 7f1d71b..3fdb6f9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java @@ -39,7 +39,7 @@ private boolean isTempURI = false; - public static enum WriteType { + public enum WriteType { DDL_EXCLUSIVE, // for use in DDL statements that require an exclusive lock, // such as dropping a table or partition DDL_SHARED, // for use in DDL operations that only need a shared lock, such as creating a table @@ -49,7 +49,7 @@ UPDATE, DELETE, PATH_WRITE, // Write to a URI, no locking done for this - }; + } private WriteType writeType; diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java index 835caf1..7f2fd83 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java @@ -29,13 +29,13 @@ public static String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime"; - public static enum IndexType { + public enum IndexType { AGGREGATE_TABLE("aggregate", "org.apache.hadoop.hive.ql.AggregateIndexHandler"), COMPACT_SUMMARY_TABLE("compact", "org.apache.hadoop.hive.ql.index.compact.CompactIndexHandler"), BITMAP_TABLE("bitmap", "org.apache.hadoop.hive.ql.index.bitmap.BitmapIndexHandler"); - private IndexType(String indexType, String className) { + IndexType(String indexType, String className) { indexTypeName = indexType; this.handlerClsName = className; } diff --git ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java index 683618f..252f937 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java @@ -265,7 +265,7 @@ public void setAcceptsFields(boolean acceptsFields) { this.acceptsFields = acceptsFields; } - public static interface FieldValidator { + public interface FieldValidator { boolean validate(ExprNodeFieldDesc exprNodeDesc); } diff --git ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java index 00ea86d..02f18da 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapQuery.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hive.ql.index.bitmap; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * Generic interface to representations of queries on bitmap indexes */ public interface BitmapQuery { - public String getAlias(); - public String toString(); + String getAlias(); + + String toString(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java index 71a9dd4..ed567a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java @@ -95,7 +95,7 @@ /** * Options for controlling the record readers. */ - public static class Options { + class Options { private final Configuration conf; private Reporter reporter; @@ -126,9 +126,9 @@ public Reporter getReporter() { } } - public static interface RowReader + interface RowReader extends RecordReader { - public ObjectInspector getObjectInspector(); + ObjectInspector getObjectInspector(); } /** @@ -140,12 +140,11 @@ public Reporter getReporter() { * @return a record reader * @throws IOException */ - public RowReader getReader(InputSplit split, - Options options) throws IOException; + RowReader getReader(InputSplit split, Options options) throws IOException; - public static interface RawReader + interface RawReader extends RecordReader { - public ObjectInspector getObjectInspector(); + ObjectInspector getObjectInspector(); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java index 6b330e1..99fc158 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java @@ -39,7 +39,7 @@ /** * Options to control how the files are written */ - public static class Options { + class Options { private final Configuration configuration; private FileSystem fs; private ObjectInspector inspector; @@ -229,8 +229,7 @@ boolean getOldStyle() { * @param options the options for the writer * @return the RecordUpdater for the output file */ - public RecordUpdater getRecordUpdater(Path path, - Options options) throws IOException; + RecordUpdater getRecordUpdater(Path path, Options options) throws IOException; /** * Create a raw writer for ACID events. @@ -240,6 +239,5 @@ public RecordUpdater getRecordUpdater(Path path, * @return a record writer * @throws IOException */ - public RecordWriter getRawRecordWriter(Path path, - Options options) throws IOException; + RecordWriter getRawRecordWriter(Path path, Options options) throws IOException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index f584926..cf6f70d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -164,7 +164,7 @@ static long parseBase(Path path) { return result; } - public static interface Directory { + public interface Directory { /** * Get the base directory. diff --git ql/src/java/org/apache/hadoop/hive/ql/io/ContentSummaryInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/ContentSummaryInputFormat.java index 4eb706e..75a921e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/ContentSummaryInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/ContentSummaryInputFormat.java @@ -30,7 +30,7 @@ */ public interface ContentSummaryInputFormat { - public ContentSummary getContentSummary(Path p, JobConf job) + ContentSummary getContentSummary(Path p, JobConf job) throws IOException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java index 3d93a40..64a82c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/FlatFileInputFormat.java @@ -70,7 +70,7 @@ * not have a way of configuring the actual Java class being * serialized/deserialized. */ - public static interface SerializationContext extends Configurable { + public interface SerializationContext extends Configurable { /** * An {@link Serialization} object for objects of type S. diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java index 5a66f87..9ad7f37 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveIgnoreKeyTextOutputFormat.java @@ -78,7 +78,7 @@ public RecordWriter getHiveRecordWriter(JobConf jc, Path outPath, final int finalRowSeparator = rowSeparator; FileSystem fs = outPath.getFileSystem(jc); final OutputStream outStream = Utilities.createCompressedStream(jc, - fs.create(outPath, progress), isCompressed); + fs.create(outPath, progress), isCompressed); return new RecordWriter() { @Override public void write(Writable r) throws IOException { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 5c4459b..ef36068 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -325,7 +325,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job boolean pushDownProjection = false; //Buffers to hold filter pushdown information StringBuilder readColumnsBuffer = new StringBuilder(newjob. - get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, ""));; + get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "")); StringBuilder readColumnNamesBuffer = new StringBuilder(newjob. get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")); // for each dir, get the InputFormat, and do getSplits. diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java index e56ece7..ebf4e40 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveNullValueSequenceFileOutputFormat.java @@ -54,7 +54,7 @@ public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, FileSystem fs = finalOutPath.getFileSystem(jc); final SequenceFile.Writer outStream = Utilities.createSequenceWriter(jc, fs, finalOutPath, - HiveKey.class, NullWritable.class, isCompressed, progress); + HiveKey.class, NullWritable.class, isCompressed, progress); keyWritable = new HiveKey(); keyIsText = valueClass.equals(Text.class); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HivePartitioner.java ql/src/java/org/apache/hadoop/hive/ql/io/HivePartitioner.java index 03ba250..e48f27a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HivePartitioner.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HivePartitioner.java @@ -26,6 +26,6 @@ /** * Get the final bucket within a partition. */ - public int getBucket(K2 key, V2 value, int numBuckets); + int getBucket(K2 key, V2 value, int numBuckets); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java index a451faf..0dacea5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveSequenceFileOutputFormat.java @@ -62,7 +62,7 @@ public RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, FileSystem fs = finalOutPath.getFileSystem(jc); final SequenceFile.Writer outStream = Utilities.createSequenceWriter(jc, fs, finalOutPath, - BytesWritable.class, valueClass, isCompressed, progress); + BytesWritable.class, valueClass, isCompressed, progress); return new RecordWriter() { @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java index 8cbf32f..4fd3298 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java @@ -70,7 +70,7 @@ public static void clear() { // The class name of the generic UDF being used by the filter String genericUDFClassName = null; - public static enum Comparison { + public enum Comparison { GREATER, LESS, EQUAL, diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java index 5ec5344..4cc6b8e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java @@ -133,7 +133,7 @@ public void write(WritableComparable key, BytesRefArrayWritable value) RCFileOutputFormat.setColumnNumber(jc, cols.length); final RCFile.Writer outWriter = Utilities.createRCFileWriter(jc, - finalOutPath.getFileSystem(jc), finalOutPath, isCompressed, progress); + finalOutPath.getFileSystem(jc), finalOutPath, isCompressed, progress); return new org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter() { @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java index ea6337c..3781ad8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java @@ -64,7 +64,7 @@ private final Map cache; public RCFileSyncCache() { - cache = Collections.synchronizedMap(new WeakHashMap()); + cache = Collections.synchronizedMap(new WeakHashMap()); } public void put(FileSplit split, long endSync) { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/ReworkMapredInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/ReworkMapredInputFormat.java index a76aece..fe48509 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/ReworkMapredInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/ReworkMapredInputFormat.java @@ -24,5 +24,5 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; public interface ReworkMapredInputFormat { - public void rework(HiveConf job, MapredWork work) throws IOException; + void rework(HiveConf job, MapredWork work) throws IOException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/DirectDecompressionCodec.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/DirectDecompressionCodec.java index 41a77b0..d2b263f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/DirectDecompressionCodec.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/DirectDecompressionCodec.java @@ -21,6 +21,8 @@ import java.nio.ByteBuffer; public interface DirectDecompressionCodec extends CompressionCodec { - public boolean isAvailable(); - public void directDecompress(ByteBuffer in, ByteBuffer out) throws IOException; + + boolean isAvailable(); + + void directDecompress(ByteBuffer in, ByteBuffer out) throws IOException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java index 99500a4..380dd90 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java @@ -50,7 +50,7 @@ * prevent the new reader from reading ORC files generated by any released * version of Hive. */ - public static enum Version { + public enum Version { V_0_11("0.11", 0, 11), V_0_12("0.12", 0, 12); @@ -60,7 +60,7 @@ private final int major; private final int minor; - private Version(String name, int major, int minor) { + Version(String name, int major, int minor) { this.name = name; this.major = major; this.minor = minor; @@ -97,8 +97,8 @@ public int getMinor() { } } - public static enum EncodingStrategy { - SPEED, COMPRESSION; + public enum EncodingStrategy { + SPEED, COMPRESSION } // Note : these string definitions for table properties are deprecated, @@ -116,7 +116,7 @@ public int getMinor() { * If introducing a new orc-specific table property, * add it here. */ - public static enum OrcTableProperties { + public enum OrcTableProperties { COMPRESSION("orc.compress"), COMPRESSION_BLOCK_SIZE("orc.compress.size"), STRIPE_SIZE("orc.stripe.size"), @@ -204,13 +204,13 @@ public static Reader createReader(Path path, return new ReaderImpl(path, options); } - public static interface WriterContext { + public interface WriterContext { Writer getWriter(); } - public static interface WriterCallback { - public void preStripeWrite(WriterContext context) throws IOException; - public void preFooterWrite(WriterContext context) throws IOException; + public interface WriterCallback { + void preStripeWrite(WriterContext context) throws IOException; + void preFooterWrite(WriterContext context) throws IOException; } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 7edb3c2..bcf5851 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -908,33 +908,31 @@ private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics, } } - static List generateSplitsInfo(Configuration conf) - throws IOException { - // use threads to resolve directories into splits - Context context = new Context(conf); - for(Path dir: getInputPaths(conf)) { - FileSystem fs = dir.getFileSystem(conf); - context.schedule(new FileGenerator(context, fs, dir)); - } - context.waitForTasks(); - // deal with exceptions - if (!context.errors.isEmpty()) { - List errors = - new ArrayList(context.errors.size()); - for(Throwable th: context.errors) { - if (th instanceof IOException) { - errors.add((IOException) th); - } else { - throw new RuntimeException("serious problem", th); - } - } - throw new InvalidInputException(errors); - } + static List generateSplitsInfo(Configuration conf) throws IOException { + // use threads to resolve directories into splits + Context context = new Context(conf); + for (Path dir : getInputPaths(conf)) { + FileSystem fs = dir.getFileSystem(conf); + context.schedule(new FileGenerator(context, fs, dir)); + } + context.waitForTasks(); + // deal with exceptions + if (!context.errors.isEmpty()) { + List errors = new ArrayList(context.errors.size()); + for (Throwable th : context.errors) { + if (th instanceof IOException) { + errors.add((IOException) th); + } else { + throw new RuntimeException("serious problem", th); + } + } + throw new InvalidInputException(errors); + } if (context.cacheStripeDetails) { LOG.info("FooterCacheHitRatio: " + context.cacheHitCounter.get() + "/" - + context.numFilesCounter.get()); + + context.numFilesCounter.get()); } - return context.splits; + return context.splits; } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java index df5afd1..f94384e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java @@ -131,7 +131,7 @@ /** * Options for creating a RecordReader. */ - public static class Options { + class Options { private boolean[] include; private long offset = 0; private long length = Long.MAX_VALUE; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java index 268eee5..31397aa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java @@ -152,7 +152,7 @@ public int hashCode() { private long currentGeneration = 0; - private final TreeMap getBufferTree(boolean direct) { + private TreeMap getBufferTree(boolean direct) { return direct ? directBuffers : buffers; } @@ -2180,7 +2180,7 @@ private static TreeReader createTreeReader(Path path, bufferSize)); } - static enum Location { + enum Location { BEFORE, MIN, MIDDLE, MAX, AFTER } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java index 71c1c4d..a706190 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java @@ -203,7 +203,7 @@ static BigInteger readBigInteger(InputStream input) throws IOException { ONE, TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN, ELEVEN, TWELVE, THIRTEEN, FOURTEEN, FIFTEEN, SIXTEEN, SEVENTEEN, EIGHTEEN, NINETEEN, TWENTY, TWENTYONE, TWENTYTWO, TWENTYTHREE, TWENTYFOUR, TWENTYSIX, - TWENTYEIGHT, THIRTY, THIRTYTWO, FORTY, FORTYEIGHT, FIFTYSIX, SIXTYFOUR; + TWENTYEIGHT, THIRTY, THIRTYTWO, FORTY, FORTYEIGHT, FIFTYSIX, SIXTYFOUR } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/StreamName.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/StreamName.java index 3d44954..e653f8e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/StreamName.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/StreamName.java @@ -25,17 +25,17 @@ private final int column; private final OrcProto.Stream.Kind kind; - public static enum Area { + public enum Area { DATA, INDEX } - public StreamName(int column, OrcProto.Stream.Kind kind) { + StreamName(int column, OrcProto.Stream.Kind kind) { this.column = column; this.kind = kind; } public boolean equals(Object obj) { - if (obj != null && obj instanceof StreamName) { + if (obj instanceof StreamName) { StreamName other = (StreamName) obj; return other.column == column && other.kind == kind; } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java index 67ce151..eb0e7dd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java @@ -149,7 +149,7 @@ protected TimestampWritable convert(Binary binary) { final Class _type; - private ETypeConverter(final Class type) { + ETypeConverter(final Class type) { this._type = type; } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java index 99901f0..788168d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java @@ -87,7 +87,7 @@ private static Type convertType(final String name, final TypeInfo typeInfo, fina int scale = decimalTypeInfo.scale(); int bytes = ParquetHiveSerDe.PRECISION_TO_BYTE_COUNT[prec - 1]; return Types.optional(PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY).length(bytes).as(OriginalType.DECIMAL). - scale(scale).precision(prec).named(name); + scale(scale).precision(prec).named(name); } else if (typeInfo.equals(TypeInfoFactory.unknownTypeInfo)) { throw new UnsupportedOperationException("Unknown type not implemented"); } else { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java index fc736a7..2ad6ecc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java @@ -29,7 +29,7 @@ * The possible operators for predicates. To get the opposites, construct * an expression with a not operator. */ - public static enum Operator { + enum Operator { EQUALS, NULL_SAFE_EQUALS, LESS_THAN, @@ -42,7 +42,7 @@ /** * The possible types for sargs. */ - public static enum Type { + enum Type { INTEGER, // all of the integer types FLOAT, // float and double STRING, // string, char, varchar @@ -53,28 +53,28 @@ /** * Get the operator for the leaf. */ - public Operator getOperator(); + Operator getOperator(); /** * Get the type of the column and literal. */ - public Type getType(); + Type getType(); /** * Get the simple column name. * @return the column name */ - public String getColumnName(); + String getColumnName(); /** * Get the literal half of the predicate leaf. * @return a Long, Double, or String */ - public Object getLiteral(); + Object getLiteral(); /** * For operators with multiple literals (IN and BETWEEN), get the literals. * @return the list of literals (Longs, Doubles, or Strings) */ - public List getLiteralList(); + List getLiteralList(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java index 208f510..044d5c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgument.java @@ -37,7 +37,7 @@ /** * The potential result sets of logical operations. */ - public static enum TruthValue { + enum TruthValue { YES, NO, NULL, YES_NULL, NO_NULL, YES_NO, YES_NO_NULL; /** @@ -156,14 +156,14 @@ public boolean isNeeded() { * list will have the duplicates removed. * @return the list of leaf predicates */ - public List getLeaves(); + List getLeaves(); /** * Evaluate the entire predicate based on the values for the leaf predicates. * @param leaves the value of each leaf predicate * @return the value of hte entire predicate */ - public TruthValue evaluate(TruthValue[] leaves); + TruthValue evaluate(TruthValue[] leaves); /** * Serialize the SARG as a kyro object and return the base64 string. @@ -174,13 +174,13 @@ public boolean isNeeded() { * * @return the serialized SARG */ - public String toKryo(); + String toKryo(); /** * A factory for creating SearchArguments. Java doesn't allow static methods * in interfaces. *DOH* */ - public static class Factory { + class Factory { public SearchArgument create(ExprNodeGenericFuncDesc expression) { return new SearchArgumentImpl(expression); } @@ -199,32 +199,32 @@ public SearchArgument create(String kryo) { * get a ExprNodeDesc. The user must call startOr, startAnd, or startNot * before adding any leaves. */ - public interface Builder { + interface Builder { /** * Start building an or operation and push it on the stack. * @return this */ - public Builder startOr(); + Builder startOr(); /** * Start building an and operation and push it on the stack. * @return this */ - public Builder startAnd(); + Builder startAnd(); /** * Start building a not operation and push it on the stack. * @return this */ - public Builder startNot(); + Builder startNot(); /** * Finish the current operation and pop it off of the stack. Each start * call must have a matching end. * @return this */ - public Builder end(); + Builder end(); /** * Add a less than leaf to the current item on the stack. @@ -232,7 +232,7 @@ public SearchArgument create(String kryo) { * @param literal the literal * @return this */ - public Builder lessThan(String column, Object literal); + Builder lessThan(String column, Object literal); /** * Add a less than equals leaf to the current item on the stack. @@ -240,7 +240,7 @@ public SearchArgument create(String kryo) { * @param literal the literal * @return this */ - public Builder lessThanEquals(String column, Object literal); + Builder lessThanEquals(String column, Object literal); /** * Add an equals leaf to the current item on the stack. @@ -248,7 +248,7 @@ public SearchArgument create(String kryo) { * @param literal the literal * @return this */ - public Builder equals(String column, Object literal); + Builder equals(String column, Object literal); /** * Add a null safe equals leaf to the current item on the stack. @@ -256,7 +256,7 @@ public SearchArgument create(String kryo) { * @param literal the literal * @return this */ - public Builder nullSafeEquals(String column, Object literal); + Builder nullSafeEquals(String column, Object literal); /** * Add an in leaf to the current item on the stack. @@ -264,14 +264,14 @@ public SearchArgument create(String kryo) { * @param literal the literal * @return this */ - public Builder in(String column, Object... literal); + Builder in(String column, Object... literal); /** * Add an is null leaf to the current item on the stack. * @param column the name of the column * @return this */ - public Builder isNull(String column); + Builder isNull(String column); /** * Add a between leaf to the current item on the stack. @@ -280,18 +280,18 @@ public SearchArgument create(String kryo) { * @param upper the literal * @return this */ - public Builder between(String column, Object lower, Object upper); + Builder between(String column, Object lower, Object upper); /** * Build and return the SearchArgument that has been defined. All of the * starts must have been ended before this call. * @return the new SearchArgument */ - public SearchArgument build(); + SearchArgument build(); } /** * Use this instance to create SearchArgument instances. */ - public static final Factory FACTORY = new Factory(); + Factory FACTORY = new Factory(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java index 2c53f65..460e0b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java @@ -172,7 +172,7 @@ public int hashCode() { } static class ExpressionTree { - static enum Operator {OR, AND, NOT, LEAF, CONSTANT} + enum Operator {OR, AND, NOT, LEAF, CONSTANT} private final Operator operator; private final List children; private final int leaf; diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManager.java index b2eb997..72e9960 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockManager.java @@ -27,28 +27,33 @@ */ public interface HiveLockManager { - public void setContext(HiveLockManagerCtx ctx) throws LockException; + void setContext(HiveLockManagerCtx ctx) throws LockException; /** - * @param key object to be locked - * @param mode mode of the lock (SHARED/EXCLUSIVE) - * @param keepAlive if the lock needs to be persisted after the statement + * @param key object to be locked + * @param mode mode of the lock (SHARED/EXCLUSIVE) + * @param keepAlive if the lock needs to be persisted after the statement */ - public HiveLock lock(HiveLockObject key, HiveLockMode mode, - boolean keepAlive) throws LockException; - public List lock(List objs, - boolean keepAlive) throws LockException; - public void unlock(HiveLock hiveLock) throws LockException; - public void releaseLocks(List hiveLocks); - - public List getLocks(boolean verifyTablePartitions, boolean fetchData) throws LockException; - public List getLocks(HiveLockObject key, boolean verifyTablePartitions, boolean fetchData) throws LockException; - public void close() throws LockException; - public void prepareRetry() throws LockException; + HiveLock lock(HiveLockObject key, HiveLockMode mode, boolean keepAlive) throws LockException; + + List lock(List objs, boolean keepAlive) throws LockException; + + void unlock(HiveLock hiveLock) throws LockException; + + void releaseLocks(List hiveLocks); + + List getLocks(boolean verifyTablePartitions, boolean fetchData) throws LockException; + + List getLocks(HiveLockObject key, boolean verifyTablePartitions, boolean fetchData) + throws LockException; + + void close() throws LockException; + + void prepareRetry() throws LockException; /** * refresh to enable new configurations. */ - public void refresh(); + void refresh(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockMode.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockMode.java index 37af243..fef5d49 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockMode.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveLockMode.java @@ -21,6 +21,6 @@ public enum HiveLockMode { SHARED, EXCLUSIVE, - SEMI_SHARED; // SEMI_SHARED can share with SHARED but not with itself + SEMI_SHARED // SEMI_SHARED can share with SHARED but not with itself } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index a7e50ad..ed378d6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2708,4 +2708,4 @@ public Function getFunction(String dbName, String funcName) throws HiveException } } -}; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java index 1eec32c..bcfb0ac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java @@ -51,23 +51,23 @@ /** * @return Class providing an implementation of {@link InputFormat} */ - public Class getInputFormatClass(); + Class getInputFormatClass(); /** * @return Class providing an implementation of {@link OutputFormat} */ - public Class getOutputFormatClass(); + Class getOutputFormatClass(); /** * @return Class providing an implementation of {@link SerDe} */ - public Class getSerDeClass(); + Class getSerDeClass(); /** * @return metadata hook implementation, or null if this * storage handler does not need any metadata notifications */ - public HiveMetaHook getMetaHook(); + HiveMetaHook getMetaHook(); /** * Returns the implementation specific authorization provider @@ -75,7 +75,7 @@ * @return authorization provider * @throws HiveException */ - public HiveAuthorizationProvider getAuthorizationProvider() + HiveAuthorizationProvider getAuthorizationProvider() throws HiveException; /** @@ -95,8 +95,7 @@ public HiveAuthorizationProvider getAuthorizationProvider() * @param jobProperties receives properties copied or transformed * from the table properties */ - public abstract void configureInputJobProperties(TableDesc tableDesc, - Map jobProperties); + void configureInputJobProperties(TableDesc tableDesc, Map jobProperties); /** * This method is called to allow the StorageHandlers the chance @@ -115,8 +114,7 @@ public abstract void configureInputJobProperties(TableDesc tableDesc, * @param jobProperties receives properties copied or transformed * from the table properties */ - public abstract void configureOutputJobProperties(TableDesc tableDesc, - Map jobProperties); + void configureOutputJobProperties(TableDesc tableDesc, Map jobProperties); /** * Deprecated use configureInputJobProperties/configureOutputJobProperties @@ -131,9 +129,7 @@ public abstract void configureOutputJobProperties(TableDesc tableDesc, * from the table properties */ @Deprecated - public void configureTableJobProperties( - TableDesc tableDesc, - Map jobProperties); + void configureTableJobProperties(TableDesc tableDesc, Map jobProperties); /** * Called just before submitting MapReduce job. @@ -141,5 +137,5 @@ public void configureTableJobProperties( * @param tableDesc descriptor for the table being accessed * @param JobConf jobConf for MapReduce job */ - public void configureJobConf(TableDesc tableDesc, JobConf jobConf); + void configureJobConf(TableDesc tableDesc, JobConf jobConf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStoragePredicateHandler.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStoragePredicateHandler.java index 7d7c764..bd51230 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStoragePredicateHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStoragePredicateHandler.java @@ -55,15 +55,13 @@ * @return decomposed form of predicate, or null if no pushdown is * possible at all */ - public DecomposedPredicate decomposePredicate( - JobConf jobConf, - Deserializer deserializer, + DecomposedPredicate decomposePredicate(JobConf jobConf, Deserializer deserializer, ExprNodeDesc predicate); /** * Struct class for returning multiple values from decomposePredicate. */ - public static class DecomposedPredicate { + class DecomposedPredicate { /** * Portion of predicate to be evaluated by storage handler. Hive * will pass this into the storage handler's input format. diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java index 9051ba6..7fd7789 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java @@ -103,13 +103,13 @@ public static String escapeString(String str) { return (escape.toString()); } - static final byte[] escapeEscapeBytes = "\\\\".getBytes();; + static final byte[] escapeEscapeBytes = "\\\\".getBytes(); static final byte[] escapeUnescapeBytes = "\\".getBytes(); - static final byte[] newLineEscapeBytes = "\\n".getBytes();; + static final byte[] newLineEscapeBytes = "\\n".getBytes(); static final byte[] newLineUnescapeBytes = "\n".getBytes(); - static final byte[] carriageReturnEscapeBytes = "\\r".getBytes();; + static final byte[] carriageReturnEscapeBytes = "\\r".getBytes(); static final byte[] carriageReturnUnescapeBytes = "\r".getBytes(); - static final byte[] tabEscapeBytes = "\\t".getBytes();; + static final byte[] tabEscapeBytes = "\\t".getBytes(); static final byte[] tabUnescapeBytes = "\t".getBytes(); static final byte[] ctrlABytes = "\u0001".getBytes(); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/InputEstimator.java ql/src/java/org/apache/hadoop/hive/ql/metadata/InputEstimator.java index 5826869..a75cae9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/InputEstimator.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/InputEstimator.java @@ -33,7 +33,7 @@ */ Estimation estimate(JobConf job, TableScanOperator ts, long remaining) throws HiveException; - public static class Estimation { + class Estimation { private int rowCount; private long totalLength; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 2f13ac2..095c381 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -1018,4 +1018,4 @@ private static String normalize(String colName) throws HiveException { } return colName.toLowerCase(); } -}; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index 2504e47..65dd1f8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -42,20 +42,21 @@ * Write an error message. * @param sqlState if {@code null}, will be ignored */ - public void error(OutputStream out, String msg, int errorCode, String sqlState) + void error(OutputStream out, String msg, int errorCode, String sqlState) throws HiveException; /** * @param sqlState if {@code null}, will be skipped in output * @param errorDetail usually string version of some Exception, if {@code null}, will be ignored */ - public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) + void error(OutputStream out, String errorMessage, int errorCode, String sqlState, + String errorDetail) throws HiveException; /** * Show a list of tables. */ - public void showTables(DataOutputStream out, Set tables) + void showTables(DataOutputStream out, Set tables) throws HiveException; /** @@ -73,41 +74,35 @@ public void showTables(DataOutputStream out, Set tables) * @param colStats * @throws HiveException */ - public void describeTable(DataOutputStream out, String colPath, - String tableName, Table tbl, Partition part, List cols, - boolean isFormatted, boolean isExt, boolean isPretty, - boolean isOutputPadded, List colStats) + void describeTable(DataOutputStream out, String colPath, String tableName, Table tbl, + Partition part, List cols, boolean isFormatted, boolean isExt, boolean isPretty, + boolean isOutputPadded, List colStats) throws HiveException; /** * Show the table status. */ - public void showTableStatus(DataOutputStream out, - Hive db, - HiveConf conf, - List

tbls, - Map part, - Partition par) + void showTableStatus(DataOutputStream out, Hive db, HiveConf conf, List
tbls, + Map part, Partition par) throws HiveException; /** * Show the table partitions. */ - public void showTablePartitons(DataOutputStream out, - List parts) + void showTablePartitons(DataOutputStream out, List parts) throws HiveException; /** * Show the databases */ - public void showDatabases(DataOutputStream out, List databases) + void showDatabases(DataOutputStream out, List databases) throws HiveException; /** * Describe a database. */ - public void showDatabaseDescription (DataOutputStream out, String database, String comment, - String location, String ownerName, String ownerType, Map params) + void showDatabaseDescription(DataOutputStream out, String database, String comment, + String location, String ownerName, String ownerType, Map params) throws HiveException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java index c411bf5..97ad42a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BigTableSelectorForAutoSMJ.java @@ -30,7 +30,7 @@ * decide the big table based on size or position of the tables. */ public interface BigTableSelectorForAutoSMJ { - public int getBigTablePosition(ParseContext parseContext, JoinOperator joinOp, - Set joinCandidates) + int getBigTablePosition(ParseContext parseContext, JoinOperator joinOp, + Set joinCandidates) throws SemanticException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java index fae5b39..aab4ef4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java @@ -369,7 +369,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, return null; } cols = cols == null ? new ArrayList() : cols; - + cppCtx.getPrunedColLists().put((Operator) nd, cols); List neededColumnIds = new ArrayList(); @@ -474,13 +474,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, flags[index] = true; colLists = Utilities.mergeUniqElems(colLists, valCols.get(index).getCols()); } - + Collections.sort(colLists); pruneReduceSinkOperator(flags, op, cppCtx); cppCtx.getPrunedColLists().put(op, colLists); return null; } - + // Reduce Sink contains the columns needed - no need to aggregate from // children for (ExprNodeDesc val : valCols) { @@ -514,7 +514,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, if (cols == null) { return null; } - + Map colExprMap = op.getColumnExprMap(); // As columns go down the DAG, the LVJ will transform internal column // names from something like 'key' to '_col0'. Because of this, we need @@ -599,8 +599,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object... nodeOutputs) throws SemanticException { SelectOperator op = (SelectOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; - - + + if (op.getChildOperators() != null) { for (Operator child : op.getChildOperators()) { // UDTF is not handled yet, so the parent SelectOp of UDTF should just assume @@ -853,11 +853,11 @@ private static void pruneOperator(NodeProcessorCtx ctx, if (inputSchema != null) { ArrayList rs = new ArrayList(); ArrayList inputCols = inputSchema.getSignature(); - for (ColumnInfo i: inputCols) { + for (ColumnInfo i : inputCols) { if (cols.contains(i.getInternalName())) { rs.add(i); } - } + } op.getSchema().setSignature(rs); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java index 46bf55d..297c3eb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java @@ -92,7 +92,7 @@ static { propagatableUdfs.add(GenericUDFOPAnd.class); - }; + } private ConstantPropagateProcFactory() { // prevent instantiation diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java index 37fa0fe..b4eb96a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java @@ -135,11 +135,11 @@ private NodeProcessor getMapSortedGroupbySkewProc(ParseContext pctx) { public enum GroupByOptimizerSortMatch { NO_MATCH, PARTIAL_MATCH, COMPLETE_MATCH - }; + } private enum ColumnOrderMatch { NO_MATCH, PREFIX_COL1_MATCH, PREFIX_COL2_MATCH, COMPLETE_MATCH - }; + } /** * SortGroupByProcessor. diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java index cc94254..34fca70 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/index/RewriteCanApplyCtx.java @@ -302,8 +302,7 @@ public Object process(Node nd, Stack stack, //Map for base table to index table mapping //TableScan operator for base table will be modified to read from index table private final Map baseToIdxTableMap = - new HashMap();; - + new HashMap(); public void addTable(String baseTableName, String indexTableName) { baseToIdxTableMap.put(baseTableName, indexTableName); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java index 48ca8f9..222ce4c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java @@ -353,14 +353,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (inpOp.getSchema() != null && inpOp.getSchema().getSignature() != null ) { for(ColumnInfo ci : inpOp.getSchema().getSignature()) { Dependency inp_dep = lctx.getIndex().getDependency(inpOp, ci); - // The dependency can be null as some of the input cis may not have - // been set in case of joins. - if (inp_dep != null) { - for(BaseColumnInfo bci : inp_dep.getBaseCols()) { - new_type = LineageCtx.getNewDependencyType(inp_dep.getType(), new_type); - tai_set.add(bci.getTabAlias()); - } - } + // The dependency can be null as some of the input cis may not have + // been set in case of joins. + if (inp_dep != null) { + for (BaseColumnInfo bci : inp_dep.getBaseCols()) { + new_type = LineageCtx.getNewDependencyType(inp_dep.getType(), new_type); + tai_set.add(bci.getTabAlias()); + } + } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java index e46e144..554d9f9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java @@ -353,7 +353,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return new NodeInfoWrapper(WalkState.CONSTANT, null, getOutExpr(fd, nodeOutputs)); } } - }; + } /** * FieldExprProcessor. diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java index 296fecb..b5eeeb8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/BucketingSortingCtx.java @@ -122,16 +122,16 @@ public void setSortedColsByDirectory(Map> sortedColsByDire * Implementations of this interface should maintain the restriction that the alias * getNames().get(i) should have index getIndexes().get(i) in the schema. */ - public static interface BucketSortCol { + public interface BucketSortCol { // Get a list of aliases for the same column - public List getNames(); + List getNames(); // Get a list of indexes for which the columns in the schema are the same - public List getIndexes(); + List getIndexes(); // Add an alternative alias for the column this instance represents, and its index in the // schema. - public void addAlias(String name, Integer index); + void addAlias(String name, Integer index); } /** diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 7c7f14b..df94147 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -119,7 +119,7 @@ Set supportedAggregationUdfs = new HashSet(); - private PhysicalContext physicalContext = null;; + private PhysicalContext physicalContext = null; public Vectorizer() { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 22945e3..fb3f788 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -615,7 +615,7 @@ private static String getUnionTypeStringFromAST(ASTNode typeNode) public Partition partHandle; public int numDynParts; // number of dynamic partition columns public List partitions; // involved partitions in TableScanOperator/FileSinkOperator - public static enum SpecType {TABLE_ONLY, STATIC_PARTITION, DYNAMIC_PARTITION}; + public enum SpecType {TABLE_ONLY, STATIC_PARTITION, DYNAMIC_PARTITION} public SpecType specType; public tableSpec(Hive db, HiveConf conf, ASTNode ast) diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java index d1e761b..1b19e85 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHook.java @@ -52,9 +52,7 @@ * @return replacement AST (typically the same as the original AST unless the * entire tree had to be replaced; must not be null) */ - public ASTNode preAnalyze( - HiveSemanticAnalyzerHookContext context, - ASTNode ast) throws SemanticException; + ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) throws SemanticException; /** * Invoked after Hive performs its own semantic analysis on a @@ -66,7 +64,6 @@ public ASTNode preAnalyze( * @param rootTasks root tasks produced by semantic analysis; * the hook is free to modify this list or its contents */ - public void postAnalyze( - HiveSemanticAnalyzerHookContext context, + void postAnalyze(HiveSemanticAnalyzerHookContext context, List> rootTasks) throws SemanticException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java index 8694d07..97dd715 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveSemanticAnalyzerHookContext.java @@ -35,14 +35,14 @@ * @return the Hive db instance; hook implementations can use this for * purposes such as getting configuration information or making metastore calls */ - public Hive getHive() throws HiveException; + Hive getHive() throws HiveException; /** * This should be called after the semantic analyzer completes. * @param sem */ - public void update(BaseSemanticAnalyzer sem); + void update(BaseSemanticAnalyzer sem); /** @@ -50,11 +50,11 @@ * preAnalyze method, they should return null. * @return the set of read entities */ - public Set getInputs(); + Set getInputs(); - public Set getOutputs(); + Set getOutputs(); - public String getUserName(); + String getUserName(); - public void setUserName(String userName); + void setUserName(String userName); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java index 60659ef..e948072 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java @@ -24,4 +24,4 @@ */ public enum JoinType { INNER, LEFTOUTER, RIGHTOUTER, FULLOUTER, UNIQUE, LEFTSEMI -}; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java index 06d3f4b..c639b92 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/PTFInvocationSpec.java @@ -68,11 +68,11 @@ public void setAstNode(ASTNode astNode) { public abstract PTFQueryInputSpec getQueryInput(); } - public static enum PTFQueryInputType { + public enum PTFQueryInputType { TABLE, SUBQUERY, PTFCOMPONENT, - WINDOWING; + WINDOWING } /* @@ -482,10 +482,10 @@ public String toString() } } - public static enum Order + public enum Order { ASC, - DESC; + DESC } public static class OrderExpression extends PartitionExpression diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index 52c39c0..1be2b1f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -149,12 +149,12 @@ public Object create(Token payload) { public Object dupNode(Object t) { return create(((CommonTree)t).token); - }; + } @Override public Object errorNode(TokenStream input, Token start, Token stop, RecognitionException e) { return new ASTErrorNode(input, start, stop, e); - }; + } }; public ASTNode parse(String command) throws ParseException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java index 908db1e..480997d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java @@ -69,23 +69,23 @@ * If this QB represents a SubQuery predicate then this will point to the SubQuery object. */ private QBSubQuery subQueryPredicateDef; - - /* - * used to give a unique name to each SubQuery QB Currently there can be at - * most 2 SubQueries in a Query: 1 in the Where clause, and 1 in the Having - * clause. - */ - private int numSubQueryPredicates; - - /* - * for now a top level QB can have 1 where clause SQ predicate. - */ - private QBSubQuery whereClauseSubQueryPredicate; - + + /* + * used to give a unique name to each SubQuery QB Currently there can be at + * most 2 SubQueries in a Query: 1 in the Where clause, and 1 in the Having + * clause. + */ + private int numSubQueryPredicates; + /* * for now a top level QB can have 1 where clause SQ predicate. */ - private QBSubQuery havingClauseSubQueryPredicate; + private QBSubQuery whereClauseSubQueryPredicate; + + /* + * for now a top level QB can have 1 where clause SQ predicate. + */ + private QBSubQuery havingClauseSubQueryPredicate; // results @@ -341,28 +341,28 @@ protected void setSubQueryDef(QBSubQuery subQueryPredicateDef) { protected QBSubQuery getSubQueryPredicateDef() { return subQueryPredicateDef; } - - protected int getNumSubQueryPredicates() { - return numSubQueryPredicates; - } - - protected int incrNumSubQueryPredicates() { - return ++numSubQueryPredicates; - } - - void setWhereClauseSubQueryPredicate(QBSubQuery sq) { - whereClauseSubQueryPredicate = sq; - } - - public QBSubQuery getWhereClauseSubQueryPredicate() { - return whereClauseSubQueryPredicate; - } - - void setHavingClauseSubQueryPredicate(QBSubQuery sq) { + + protected int getNumSubQueryPredicates() { + return numSubQueryPredicates; + } + + protected int incrNumSubQueryPredicates() { + return ++numSubQueryPredicates; + } + + void setWhereClauseSubQueryPredicate(QBSubQuery sq) { + whereClauseSubQueryPredicate = sq; + } + + public QBSubQuery getWhereClauseSubQueryPredicate() { + return whereClauseSubQueryPredicate; + } + + void setHavingClauseSubQueryPredicate(QBSubQuery sq) { havingClauseSubQueryPredicate = sq; } - - public QBSubQuery getHavingClauseSubQueryPredicate() { + + public QBSubQuery getHavingClauseSubQueryPredicate() { return havingClauseSubQueryPredicate; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java index e923bca..1ad4ad3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java @@ -23,20 +23,14 @@ /** * Implementation of the query block expression. - * **/ - public class QBExpr { private static final Log LOG = LogFactory.getLog("hive.ql.parse.QBExpr"); - /** - * Opcode. - * - */ - public static enum Opcode { + public enum Opcode { NULLOP, UNION, INTERSECT, DIFF - }; + } private Opcode opcode; private QBExpr qbexpr1; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index 911ac8a..99b77ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -558,7 +558,7 @@ public tableSpec getTableSpec() { return destToLateralView; } - protected static enum ClauseType { + protected enum ClauseType { CLUSTER_BY_CLAUSE, DISTRIBUTE_BY_CLAUSE, ORDER_BY_CLAUSE, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java index d398c88..030d708 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java @@ -38,8 +38,8 @@ import org.apache.hadoop.hive.ql.parse.SubQueryDiagnostic.QBSubQueryRewrite; public class QBSubQuery implements ISubQueryJoinInfo { - - public static enum SubQueryType { + + public enum SubQueryType { EXISTS, NOT_EXISTS, IN, @@ -93,7 +93,7 @@ public SubQueryType getType() { * - has a reference to a SubQuery table source * - has a reference to Outer(parent) Query table source */ - static enum ExprType { + enum ExprType { REFERS_NONE(false, false) { @Override public ExprType combine(ExprType other) { @@ -149,16 +149,16 @@ public boolean refersSubQuery() { } /* - * This class captures the information about a + * This class captures the information about a * conjunct in the where clause of the SubQuery. * For a equality predicate it capture for each side: * - the AST * - the type of Expression (basically what columns are referenced) - * - for Expressions that refer the parent it captures the + * - for Expressions that refer the parent it captures the * parent's ColumnInfo. In case of outer Aggregation expressions * we need this to introduce a new mapping in the OuterQuery * RowResolver. A join condition must use qualified column references, - * so we generate a new name for the aggr expression and use it in the + * so we generate a new name for the aggr expression and use it in the * joining condition. * For e.g. * having exists ( select x from R2 where y = min(R1.z) ) @@ -174,8 +174,8 @@ public boolean refersSubQuery() { private final ColumnInfo leftOuterColInfo; private final ColumnInfo rightOuterColInfo; - Conjunct(ASTNode leftExpr, - ASTNode rightExpr, + Conjunct(ASTNode leftExpr, + ASTNode rightExpr, ExprType leftExprType, ExprType rightExprType, ColumnInfo leftOuterColInfo, @@ -239,8 +239,8 @@ ColumnInfo getRightOuterColInfo() { Stack stack; ConjunctAnalyzer(RowResolver parentQueryRR, - boolean forHavingClause, - String parentQueryNewAlias) { + boolean forHavingClause, + String parentQueryNewAlias) { this.parentQueryRR = parentQueryRR; defaultExprProcessor = new DefaultExprProcessor(); this.forHavingClause = forHavingClause; @@ -259,14 +259,14 @@ ColumnInfo getRightOuterColInfo() { */ private ObjectPair analyzeExpr(ASTNode expr) { ColumnInfo cInfo = null; - if ( forHavingClause ) { - try { - cInfo = parentQueryRR.getExpression(expr); - if ( cInfo != null) { - return ObjectPair.create(ExprType.REFERS_PARENT, cInfo); - } - } catch(SemanticException se) { - } + if (forHavingClause) { + try { + cInfo = parentQueryRR.getExpression(expr); + if (cInfo != null) { + return ObjectPair.create(ExprType.REFERS_PARENT, cInfo); + } + } catch (SemanticException se) { + } } if ( expr.getType() == HiveParser.DOT) { ASTNode dot = firstDot(expr); @@ -308,12 +308,12 @@ Conjunct analyzeConjunct(ASTNode conjunct) throws SemanticException { ObjectPair leftInfo = analyzeExpr(left); ObjectPair rightInfo = analyzeExpr(right); - return new Conjunct(left, right, + return new Conjunct(left, right, leftInfo.getFirst(), rightInfo.getFirst(), leftInfo.getSecond(), rightInfo.getSecond()); } else { ObjectPair sqExprInfo = analyzeExpr(conjunct); - return new Conjunct(conjunct, null, + return new Conjunct(conjunct, null, sqExprInfo.getFirst(), null, sqExprInfo.getSecond(), sqExprInfo.getSecond()); } @@ -354,86 +354,86 @@ protected ASTNode firstDot(ASTNode dot) { } /* - * When transforming a Not In SubQuery we need to check for nulls in the + * When transforming a Not In SubQuery we need to check for nulls in the * Joining expressions of the SubQuery. If there are nulls then the SubQuery always - * return false. For more details see + * return false. For more details see * https://issues.apache.org/jira/secure/attachment/12614003/SubQuerySpec.pdf - * + * * Basically, SQL semantics say that: * - R1.A not in (null, 1, 2, ...) - * is always false. - * A 'not in' operator is equivalent to a '<> all'. Since a not equal check with null + * is always false. + * A 'not in' operator is equivalent to a '<> all'. Since a not equal check with null * returns false, a not in predicate against aset with a 'null' value always returns false. - * + * * So for not in SubQuery predicates: * - we join in a null count predicate. * - And the joining condition is that the 'Null Count' query has a count of 0. - * + * */ class NotInCheck implements ISubQueryJoinInfo { - + private static final String CNT_ALIAS = "c1"; - + /* * expressions in SubQ that are joined to the Outer Query. */ List subQryCorrExprs; - + /* * row resolver of the SubQuery. * Set by the SemanticAnalyzer after the Plan for the SubQuery is genned. * This is neede in case the SubQuery select list contains a TOK_ALLCOLREF */ RowResolver sqRR; - + NotInCheck() { subQryCorrExprs = new ArrayList(); } - + void addCorrExpr(ASTNode corrExpr) { subQryCorrExprs.add(corrExpr); } - + public ASTNode getSubQueryAST() { ASTNode ast = SubQueryUtils.buildNotInNullCheckQuery( - QBSubQuery.this.getSubQueryAST(), - QBSubQuery.this.getAlias(), - CNT_ALIAS, + QBSubQuery.this.getSubQueryAST(), + QBSubQuery.this.getAlias(), + CNT_ALIAS, subQryCorrExprs, sqRR); SubQueryUtils.setOriginDeep(ast, QBSubQuery.this.originalSQASTOrigin); return ast; } - + public String getAlias() { return QBSubQuery.this.getAlias() + "_notin_nullcheck"; } - + public JoinType getJoinType() { return JoinType.LEFTSEMI; } - + public ASTNode getJoinConditionAST() { - ASTNode ast = + ASTNode ast = SubQueryUtils.buildNotInNullJoinCond(getAlias(), CNT_ALIAS); SubQueryUtils.setOriginDeep(ast, QBSubQuery.this.originalSQASTOrigin); return ast; } - + public QBSubQuery getSubQuery() { return QBSubQuery.this; } - + public String getOuterQueryId() { return QBSubQuery.this.getOuterQueryId(); } - + void setSQRR(RowResolver sqRR) { this.sqRR = sqRR; } - + } - + private final String outerQueryId; private final int sqIdx; private final String alias; @@ -455,11 +455,11 @@ void setSQRR(RowResolver sqRR) { private int numOfCorrelationExprsAddedToSQSelect; private boolean groupbyAddedToSQ; - + private int numOuterCorrExprsForHaving; - + private NotInCheck notInCheck; - + private QBSubQueryRewrite subQueryDiagnostic; public QBSubQuery(String outerQueryId, @@ -483,11 +483,11 @@ public QBSubQuery(String outerQueryId, originalSQASTOrigin = new ASTNodeOrigin("SubQuery", alias, s, alias, originalSQAST); numOfCorrelationExprsAddedToSQSelect = 0; groupbyAddedToSQ = false; - + if ( operator.getType() == SubQueryType.NOT_IN ) { notInCheck = new NotInCheck(); } - + subQueryDiagnostic = SubQueryDiagnostic.getRewrite(this, ctx.getTokenRewriteStream(), ctx); } @@ -500,18 +500,18 @@ public ASTNode getOuterQueryExpression() { public SubQueryTypeDef getOperator() { return operator; } - + public ASTNode getOriginalSubQueryASTForRewrite() { return (operator.getType() == SubQueryType.NOT_EXISTS - || operator.getType() == SubQueryType.NOT_IN ? - (ASTNode) originalSQASTOrigin.getUsageNode().getParent() : + || operator.getType() == SubQueryType.NOT_IN ? + (ASTNode) originalSQASTOrigin.getUsageNode().getParent() : originalSQASTOrigin.getUsageNode()); } void validateAndRewriteAST(RowResolver outerQueryRR, - boolean forHavingClause, - String outerQueryAlias, - Set outerQryAliases) throws SemanticException { + boolean forHavingClause, + String outerQueryAlias, + Set outerQryAliases) throws SemanticException { ASTNode selectClause = (ASTNode) subQueryAST.getChild(1).getChild(1); @@ -519,12 +519,12 @@ void validateAndRewriteAST(RowResolver outerQueryRR, if ( selectClause.getChild(0).getType() == HiveParser.TOK_HINTLIST ) { selectExprStart = 1; } - + /* * Restriction.16.s :: Correlated Expression in Outer Query must not contain * unqualified column references. */ - if ( parentQueryExpression != null && !forHavingClause ) { + if ( parentQueryExpression != null && !forHavingClause ) { ASTNode u = SubQueryUtils.hasUnQualifiedColumnReferences(parentQueryExpression); if ( u != null ) { subQueryAST.setOrigin(originalSQASTOrigin); @@ -532,7 +532,7 @@ void validateAndRewriteAST(RowResolver outerQueryRR, u, "Correlating expression cannot contain unqualified column references.")); } } - + /* * Restriction 17.s :: SubQuery cannot use the same table alias as one used in * the Outer Query. @@ -546,14 +546,14 @@ void validateAndRewriteAST(RowResolver outerQueryRR, } if ( sharedAlias != null) { ASTNode whereClause = SubQueryUtils.subQueryWhere(subQueryAST); - + if ( whereClause != null ) { ASTNode u = SubQueryUtils.hasUnQualifiedColumnReferences(whereClause); if ( u != null ) { subQueryAST.setOrigin(originalSQASTOrigin); throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg( u, "SubQuery cannot use the table alias: " + sharedAlias + "; " + - "this is also an alias in the Outer Query and SubQuery contains a unqualified column reference")); + "this is also an alias in the Outer Query and SubQuery contains a unqualified column reference")); } } } @@ -641,25 +641,25 @@ private void setJoinType() { } void buildJoinCondition(RowResolver outerQueryRR, RowResolver sqRR, - boolean forHavingClause, - String outerQueryAlias) throws SemanticException { + boolean forHavingClause, + String outerQueryAlias) throws SemanticException { ASTNode parentQueryJoinCond = null; if ( parentQueryExpression != null ) { - + ColumnInfo outerQueryCol = null; try { outerQueryCol = outerQueryRR.getExpression(parentQueryExpression); } catch(SemanticException se) { } - + parentQueryJoinCond = SubQueryUtils.buildOuterQryToSQJoinCond( getOuterQueryExpression(), alias, sqRR); - + if ( outerQueryCol != null ) { - rewriteCorrConjunctForHaving(parentQueryJoinCond, true, + rewriteCorrConjunctForHaving(parentQueryJoinCond, true, outerQueryAlias, outerQueryRR, outerQueryCol); } subQueryDiagnostic.addJoinCondition(parentQueryJoinCond, outerQueryCol != null, true); @@ -682,10 +682,10 @@ void buildJoinCondition(RowResolver outerQueryRR, RowResolver sqRR, ASTNode updateOuterQueryFilter(ASTNode outerQryFilter) { if (postJoinConditionAST == null ) { return outerQryFilter; - } - + } + subQueryDiagnostic.addPostJoinCondition(postJoinConditionAST); - + if ( outerQryFilter == null ) { return postJoinConditionAST; } @@ -738,7 +738,7 @@ String getNextCorrExprAlias() { * Additional things for Having clause: * - A correlation predicate may refer to an aggregation expression. * - This introduces 2 twists to the rewrite: - * a. When analyzing equality predicates we need to analyze each side + * a. When analyzing equality predicates we need to analyze each side * to see if it is an aggregation expression from the Outer Query. * So for e.g. this is a valid correlation predicate: * R2.x = min(R1.y) @@ -748,12 +748,12 @@ String getNextCorrExprAlias() { * to contain a qualified column references. * We handle this by generating a new name for the aggregation expression, * like R1._gby_sq_col_1 and adding this mapping to the Outer Query's - * Row Resolver. Then we construct a joining predicate using this new + * Row Resolver. Then we construct a joining predicate using this new * name; so in our e.g. the condition would be: R2.x = R1._gby_sq_col_1 */ private void rewrite(RowResolver parentQueryRR, - boolean forHavingClause, - String outerQueryAlias) throws SemanticException { + boolean forHavingClause, + String outerQueryAlias) throws SemanticException { ASTNode selectClause = (ASTNode) subQueryAST.getChild(1).getChild(1); ASTNode whereClause = SubQueryUtils.subQueryWhere(subQueryAST); @@ -766,7 +766,7 @@ private void rewrite(RowResolver parentQueryRR, SubQueryUtils.extractConjuncts(searchCond, conjuncts); ConjunctAnalyzer conjunctAnalyzer = new ConjunctAnalyzer(parentQueryRR, - forHavingClause, outerQueryAlias); + forHavingClause, outerQueryAlias); ASTNode sqNewSearchCond = null; for(ASTNode conjunctAST : conjuncts) { @@ -805,7 +805,7 @@ private void rewrite(RowResolver parentQueryRR, corrCondLeftIsRewritten = true; if ( forHavingClause && conjunct.getRightOuterColInfo() != null ) { corrCondRightIsRewritten = true; - rewriteCorrConjunctForHaving(conjunctAST, false, outerQueryAlias, + rewriteCorrConjunctForHaving(conjunctAST, false, outerQueryAlias, parentQueryRR, conjunct.getRightOuterColInfo()); } ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate( @@ -829,7 +829,7 @@ private void rewrite(RowResolver parentQueryRR, corrCondRightIsRewritten = true; if ( forHavingClause && conjunct.getLeftOuterColInfo() != null ) { corrCondLeftIsRewritten = true; - rewriteCorrConjunctForHaving(conjunctAST, true, outerQueryAlias, + rewriteCorrConjunctForHaving(conjunctAST, true, outerQueryAlias, parentQueryRR, conjunct.getLeftOuterColInfo()); } ASTNode joinPredciate = SubQueryUtils.alterCorrelatedPredicate( @@ -901,7 +901,7 @@ private ASTNode getSubQueryGroupByAST() { for(ASTNode child : newChildren ) { subQueryAST.addChild(child); } - + subQueryDiagnostic.setAddGroupByClause(); return groupBy; @@ -927,26 +927,26 @@ public ASTNode getJoinConditionAST() { public int getNumOfCorrelationExprsAddedToSQSelect() { return numOfCorrelationExprsAddedToSQSelect; } - - + + public QBSubQueryRewrite getDiagnostic() { return subQueryDiagnostic; } - + public QBSubQuery getSubQuery() { return this; } - + NotInCheck getNotInCheck() { return notInCheck; } - + private void rewriteCorrConjunctForHaving(ASTNode conjunctASTNode, boolean refersLeft, String outerQueryAlias, RowResolver outerQueryRR, ColumnInfo outerQueryCol) { - + String newColAlias = "_gby_sq_col_" + numOuterCorrExprsForHaving++; ASTNode outerExprForCorr = SubQueryUtils.createColRefAST(outerQueryAlias, newColAlias); if ( refersLeft ) { @@ -956,5 +956,5 @@ private void rewriteCorrConjunctForHaving(ASTNode conjunctASTNode, } outerQueryRR.put(outerQueryAlias, newColAlias, outerQueryCol); } - + } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java index 33b8a21..2e58b80 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java @@ -98,7 +98,7 @@ public ASTNode getExpressionSource(ASTNode node) { public void put(String tab_alias, String col_alias, ColumnInfo colInfo) { if (!addMappingOnly(tab_alias, col_alias, colInfo)) { - rowSchema.getSignature().add(colInfo); + rowSchema.getSignature().add(colInfo); } } @@ -289,7 +289,7 @@ public void setIsExprResolver(boolean isExprResolver) { public boolean getIsExprResolver() { return isExprResolver; } - + public String[] getAlternateMappings(String internalName) { return altInvRslvMap.get(internalName); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 51838ae..34b80b6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -828,7 +828,7 @@ private void processJoin(QB qb, ASTNode join) throws SemanticException { processPTF(qb, child); PTFInvocationSpec ptfInvocationSpec = qb.getPTFInvocationSpec(child); String inputAlias = ptfInvocationSpec == null ? null : - ptfInvocationSpec.getFunction().getAlias();; + ptfInvocationSpec.getFunction().getAlias(); if ( inputAlias == null ) { throw new SemanticException(generateErrorMessage(child, "PTF invocation in a Join must have an alias")); @@ -2233,8 +2233,8 @@ private Operator genFilterPlan(ASTNode searchCond, QB qb, Operator input, String havingInputAlias = null; if ( forHavingClause ) { - havingInputAlias = "gby_sq" + sqIdx; - aliasToOpInfo.put(havingInputAlias, input); + havingInputAlias = "gby_sq" + sqIdx; + aliasToOpInfo.put(havingInputAlias, input); } subQuery.validateAndRewriteAST(inputRR, forHavingClause, havingInputAlias, aliasToOpInfo.keySet()); @@ -5864,7 +5864,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) if (!("".equals(nm[0])) && nm[1] != null) { colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` } - col.setName(colName);; + col.setName(colName); col.setType(colInfo.getType().getTypeName()); field_schemas.add(col); } @@ -11533,40 +11533,37 @@ private Operator genReduceSinkPlanForWindowing(WindowingSpec spec, } private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo, - Operator reduceSinkOp, RowResolver gByRR) { - if ( gByExpr.getType() == HiveParser.DOT - && gByExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL ) { - String tab_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr - .getChild(0).getChild(0).getText()); - String col_alias = BaseSemanticAnalyzer.unescapeIdentifier( - gByExpr.getChild(1).getText()); - gByRR.put(tab_alias, col_alias, colInfo); - } else if ( gByExpr.getType() == HiveParser.TOK_TABLE_OR_COL ) { - String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr - .getChild(0).getText()); - String tab_alias = null; - /* - * If the input to the GBy has a tab alias for the column, then add an entry - * based on that tab_alias. - * For e.g. this query: - * select b.x, count(*) from t1 b group by x - * needs (tab_alias=b, col_alias=x) in the GBy RR. - * tab_alias=b comes from looking at the RowResolver that is the ancestor - * before any GBy/ReduceSinks added for the GBY operation. - */ - Operator parent = reduceSinkOp; - while ( parent instanceof ReduceSinkOperator || - parent instanceof GroupByOperator ) { - parent = parent.getParentOperators().get(0); - } - RowResolver parentRR = opParseCtx.get(parent).getRowResolver(); - try { - ColumnInfo pColInfo = parentRR.get(tab_alias, col_alias); - tab_alias = pColInfo == null ? null : pColInfo.getTabAlias(); - } catch(SemanticException se) { - } - gByRR.put(tab_alias, col_alias, colInfo); - } + Operator reduceSinkOp, RowResolver gByRR) { + if (gByExpr.getType() == HiveParser.DOT + && gByExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) { + String tab_alias = + BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getChild(0).getText()); + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(1).getText()); + gByRR.put(tab_alias, col_alias, colInfo); + } else if (gByExpr.getType() == HiveParser.TOK_TABLE_OR_COL) { + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getText()); + String tab_alias = null; + /* + * If the input to the GBy has a tab alias for the column, then add an entry + * based on that tab_alias. + * For e.g. this query: + * select b.x, count(*) from t1 b group by x + * needs (tab_alias=b, col_alias=x) in the GBy RR. + * tab_alias=b comes from looking at the RowResolver that is the ancestor + * before any GBy/ReduceSinks added for the GBY operation. + */ + Operator parent = reduceSinkOp; + while (parent instanceof ReduceSinkOperator || parent instanceof GroupByOperator) { + parent = parent.getParentOperators().get(0); + } + RowResolver parentRR = opParseCtx.get(parent).getRowResolver(); + try { + ColumnInfo pColInfo = parentRR.get(tab_alias, col_alias); + tab_alias = pColInfo == null ? null : pColInfo.getTabAlias(); + } catch (SemanticException se) { + } + gByRR.put(tab_alias, col_alias, colInfo); + } } private WriteEntity.WriteType determineWriteType(LoadTableDesc ltd, boolean isNonNativeTable) { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java index 089ad78..e805943 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java @@ -299,7 +299,7 @@ private static void getTableAliasesInSubQuery(ASTNode joinNode, List ali getTableAliasesInSubQuery(right, aliases); } } - + static ASTNode hasUnQualifiedColumnReferences(ASTNode ast) { int type = ast.getType(); if ( type == HiveParser.DOT ) { @@ -308,7 +308,7 @@ static ASTNode hasUnQualifiedColumnReferences(ASTNode ast) { else if ( type == HiveParser.TOK_TABLE_OR_COL ) { return ast; } - + for(int i=0; i < ast.getChildCount(); i++ ) { ASTNode c = hasUnQualifiedColumnReferences((ASTNode) ast.getChild(i)); if ( c != null ) { @@ -317,7 +317,7 @@ else if ( type == HiveParser.TOK_TABLE_OR_COL ) { } return null; } - + static ASTNode subQueryWhere(ASTNode subQueryAST) { if ( subQueryAST.getChild(1).getChildCount() > 2 && subQueryAST.getChild(1).getChild(2).getType() == HiveParser.TOK_WHERE ) { @@ -464,15 +464,15 @@ static void setOriginDeep(ASTNode node, ASTNodeOrigin origin) { * This Subquery is joined with the Outer Query plan on the join condition 'c = 0'. * The join condition ensures that in case there are null values in the joining column * the Query returns no rows. - * + * * The AST tree for this is: - * + * * ^(TOK_QUERY * ^(TOK FROM * ^(TOK_SUBQUERY * {the input SubQuery, with correlation removed} - * subQueryAlias - * ) + * subQueryAlias + * ) * ) * ^(TOK_INSERT * ^(TOK_DESTINATION...) @@ -480,51 +480,51 @@ static void setOriginDeep(ASTNode node, ASTNodeOrigin origin) { * ^(TOK_SELECTEXPR {ast tree for count *} * ) * ^(TOK_WHERE - * {is null check for joining column} + * {is null check for joining column} * ) * ) * ) - */ - static ASTNode buildNotInNullCheckQuery(ASTNode subQueryAST, - String subQueryAlias, + */ + static ASTNode buildNotInNullCheckQuery(ASTNode subQueryAST, + String subQueryAlias, String cntAlias, List corrExprs, RowResolver sqRR) { - + subQueryAST = (ASTNode) ParseDriver.adaptor.dupTree(subQueryAST); - ASTNode qry = (ASTNode) + ASTNode qry = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_QUERY, "TOK_QUERY"); - + qry.addChild(buildNotInNullCheckFrom(subQueryAST, subQueryAlias)); ASTNode insertAST = buildNotInNullCheckInsert(); qry.addChild(insertAST); insertAST.addChild(buildNotInNullCheckSelect(cntAlias)); - insertAST.addChild(buildNotInNullCheckWhere(subQueryAST, + insertAST.addChild(buildNotInNullCheckWhere(subQueryAST, subQueryAlias, corrExprs, sqRR)); - + return qry; } - + /* * build: * ^(TOK FROM * ^(TOK_SUBQUERY * {the input SubQuery, with correlation removed} - * subQueryAlias - * ) + * subQueryAlias + * ) * ) */ static ASTNode buildNotInNullCheckFrom(ASTNode subQueryAST, String subQueryAlias) { ASTNode from = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FROM, "TOK_FROM"); - ASTNode sqExpr = (ASTNode) + ASTNode sqExpr = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY"); sqExpr.addChild(subQueryAST); sqExpr.addChild(createAliasAST(subQueryAlias)); from.addChild(sqExpr); return from; } - + /* * build * ^(TOK_INSERT @@ -532,21 +532,21 @@ static ASTNode buildNotInNullCheckFrom(ASTNode subQueryAST, String subQueryAlias * ) */ static ASTNode buildNotInNullCheckInsert() { - ASTNode insert = (ASTNode) + ASTNode insert = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_INSERT, "TOK_INSERT"); - ASTNode dest = (ASTNode) + ASTNode dest = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_DESTINATION, "TOK_DESTINATION"); - ASTNode dir = (ASTNode) + ASTNode dir = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_DIR, "TOK_DIR"); - ASTNode tfile = (ASTNode) + ASTNode tfile = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_TMP_FILE, "TOK_TMP_FILE"); insert.addChild(dest); dest.addChild(dir); dir.addChild(tfile); - + return insert; } - + /* * build: * ^(TOK_SELECT @@ -554,37 +554,37 @@ static ASTNode buildNotInNullCheckInsert() { * ) */ static ASTNode buildNotInNullCheckSelect(String cntAlias) { - ASTNode select = (ASTNode) + ASTNode select = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_SELECT, "TOK_SELECT"); - ASTNode selectExpr = (ASTNode) + ASTNode selectExpr = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_SELEXPR, "TOK_SELEXPR"); - ASTNode countStar = (ASTNode) + ASTNode countStar = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTIONSTAR, "TOK_FUNCTIONSTAR"); ASTNode alias = (createAliasAST(cntAlias)); - + countStar.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier, "count")); select.addChild(selectExpr); selectExpr.addChild(countStar); selectExpr.addChild(alias); - + return select; } - + /* * build: * ^(TOK_WHERE - * {is null check for joining column} + * {is null check for joining column} * ) */ - static ASTNode buildNotInNullCheckWhere(ASTNode subQueryAST, - String sqAlias, + static ASTNode buildNotInNullCheckWhere(ASTNode subQueryAST, + String sqAlias, List corrExprs, RowResolver sqRR) { - + ASTNode sqSelect = (ASTNode) subQueryAST.getChild(1).getChild(1); ASTNode selExpr = (ASTNode) sqSelect.getChild(0); String colAlias = null; - + if ( selExpr.getChildCount() == 2 ) { colAlias = selExpr.getChild(1).getText(); } else if (selExpr.getChild(0).getType() != HiveParser.TOK_ALLCOLREF) { @@ -596,42 +596,41 @@ static ASTNode buildNotInNullCheckWhere(ASTNode subQueryAST, String[] joinColName = sqRR.reverseLookup(joinColumn.getInternalName()); colAlias = joinColName[1]; } - + ASTNode searchCond = isNull(createColRefAST(sqAlias, colAlias)); - + for(ASTNode e : corrExprs ) { ASTNode p = (ASTNode) ParseDriver.adaptor.dupTree(e); - p = isNull(p); - searchCond = orAST(searchCond, p); + p = isNull(p); + searchCond = orAST(searchCond, p); } - + ASTNode where = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_WHERE, "TOK_WHERE"); where.addChild(searchCond); return where; } - + static ASTNode buildNotInNullJoinCond(String subqueryAlias, String cntAlias) { - - ASTNode eq = (ASTNode) + + ASTNode eq = (ASTNode) ParseDriver.adaptor.create(HiveParser.EQUAL, "="); - + eq.addChild(createColRefAST(subqueryAlias, cntAlias)); - eq.addChild((ASTNode) + eq.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Number, "0")); - + return eq; } - - public static interface ISubQueryJoinInfo { - public String getAlias(); - public JoinType getJoinType(); - public ASTNode getJoinConditionAST(); - public QBSubQuery getSubQuery(); - public ASTNode getSubQueryAST(); - public String getOuterQueryId(); - }; - - + + public interface ISubQueryJoinInfo { + String getAlias(); + JoinType getJoinType(); + ASTNode getJoinConditionAST(); + QBSubQuery getSubQuery(); + ASTNode getSubQueryAST(); + String getOuterQueryId(); + } + /* * Using CommonTreeAdaptor because the Adaptor in ParseDriver doesn't carry * the token indexes when duplicating a Tree. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java index 28afc6b..73c622b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java @@ -479,7 +479,7 @@ protected void ensureOrderSpec() { setOrder(order); } } - }; + } /* * A WindowFrame specifies the Range on which a Window Function should @@ -534,12 +534,11 @@ public String toString() } - public static enum Direction - { + public enum Direction { PRECEDING, CURRENT, FOLLOWING - }; + } /* * A Boundary specifies how many rows back/forward a WindowFrame extends from the diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java index 70d9b7a..c22e07c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java @@ -33,40 +33,40 @@ * needs to be executed or can throw a SemanticException. */ public interface HiveAuthorizationTaskFactory { - public Task createCreateRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createCreateRoleTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createDropRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createDropRoleTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createShowRoleGrantTask(ASTNode node, Path resultFile, - HashSet inputs, HashSet outputs) throws SemanticException; + Task createShowRoleGrantTask(ASTNode node, Path resultFile, + HashSet inputs, HashSet outputs) throws SemanticException; - public Task createGrantRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createGrantRoleTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createRevokeRoleTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createRevokeRoleTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createGrantTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createGrantTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createShowGrantTask(ASTNode node, Path resultFile, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createShowGrantTask(ASTNode node, Path resultFile, + HashSet inputs, HashSet outputs) throws SemanticException; - public Task createRevokeTask(ASTNode node, HashSet inputs, - HashSet outputs) throws SemanticException; + Task createRevokeTask(ASTNode node, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createSetRoleTask(String roleName, - HashSet inputs, HashSet outputs) throws SemanticException; + Task createSetRoleTask(String roleName, HashSet inputs, + HashSet outputs) throws SemanticException; - public Task createShowCurrentRoleTask(HashSet inputs, - HashSet outputs, Path resFile) throws SemanticException; + Task createShowCurrentRoleTask(HashSet inputs, + HashSet outputs, Path resFile) throws SemanticException; - public Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, - HashSet inputs, HashSet outputs) throws SemanticException; + Task createShowRolePrincipalsTask(ASTNode ast, Path resFile, + HashSet inputs, HashSet outputs) throws SemanticException; - public Task createShowRolesTask(ASTNode ast, Path resFile, - HashSet inputs, HashSet outputs) throws SemanticException; + Task createShowRolesTask(ASTNode ast, Path resFile, + HashSet inputs, HashSet outputs) throws SemanticException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java index e45bc26..259fa3c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterDatabaseDesc.java @@ -31,9 +31,9 @@ private static final long serialVersionUID = 1L; // Only altering the database property and owner is currently supported - public static enum ALTER_DB_TYPES { + public enum ALTER_DB_TYPES { ALTER_PROPERTY, ALTER_OWNER - }; + } ALTER_DB_TYPES alterType; String databaseName; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java index 0318e4b..f8bb20a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterIndexDesc.java @@ -44,9 +44,9 @@ * alterIndexTypes. * */ - public static enum AlterIndexTypes { + public enum AlterIndexTypes { UPDATETIMESTAMP, - ADDPROPS}; + ADDPROPS} AlterIndexTypes op; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 20d863b..8c3ffa3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -43,7 +43,7 @@ * alterTableTypes. * */ - public static enum AlterTableTypes { + public enum AlterTableTypes { RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, DROPPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION, TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE, @@ -51,7 +51,7 @@ ALTERBUCKETNUM, ALTERPARTITION, COMPACT } - public static enum ProtectModeType { + public enum ProtectModeType { NO_DROP, OFFLINE, READ_ONLY, NO_DROP_CASCADE } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ArchiveWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/ArchiveWork.java index 9fb5c8b..e4adceb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ArchiveWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ArchiveWork.java @@ -29,10 +29,9 @@ private static final long serialVersionUID = 1L; private ArchiveActionType type; - public static enum ArchiveActionType { + public enum ArchiveActionType { ARCHIVE, UNARCHIVE - }; - + } public ArchiveActionType getType() { return type; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java index 4b2c1ad..3b4dc51 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java @@ -72,7 +72,7 @@ //Is this an expression that should perform a comparison for sorted searches private boolean isSortedExpr; - public ExprNodeGenericFuncDesc() {; + public ExprNodeGenericFuncDesc() { } /* If the function has an explicit name like func(args) then call a diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java index 4475b76..d052243 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java @@ -44,13 +44,9 @@ */ private static long serialVersionUID = 1L; - /** - * Mode. - * - */ - public static enum Mode { + public enum Mode { COMPLETE, PARTIAL1, PARTIAL2, PARTIALS, FINAL, HASH, MERGEPARTIAL - }; + } private Mode mode; private boolean groupKeyNotReductionKey; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java index 6a447ea..37957d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java @@ -116,7 +116,6 @@ new Privilege[]{Privilege.ALTER_DATA}), SHOW_COMPACTIONS("SHOW COMPACTIONS", null, null), SHOW_TRANSACTIONS("SHOW TRANSACTIONS", null, null); - ; private String operationName; @@ -136,8 +135,8 @@ public String getOperationName() { return operationName; } - private HiveOperation(String operationName, - Privilege[] inputRequiredPrivileges, Privilege[] outputRequiredPrivileges) { + HiveOperation(String operationName, Privilege[] inputRequiredPrivileges, + Privilege[] outputRequiredPrivileges) { this.operationName = operationName; this.inputRequiredPrivileges = inputRequiredPrivileges; this.outputRequiredPrivileges = outputRequiredPrivileges; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 8da2848..0226278 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -37,7 +37,7 @@ private String destinationCreateTable; static { - PTFUtils.makeTransient(LoadFileDesc.class, "targetDir"); + PTFUtils.makeTransient(LoadFileDesc.class, "targetDir"); } public LoadFileDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java index fffac7b..0627b9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java @@ -38,7 +38,7 @@ private transient List srcDirs; static { - PTFUtils.makeTransient(LoadMultiFilesDesc.class, "targetDirs"); + PTFUtils.makeTransient(LoadMultiFilesDesc.class, "targetDirs"); } public LoadMultiFilesDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java index c8c9570..3a18e47 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/OperatorDesc.java @@ -21,9 +21,9 @@ import java.io.Serializable; public interface OperatorDesc extends Serializable, Cloneable { - public Object clone() throws CloneNotSupportedException; - public Statistics getStatistics(); - public void setStatistics(Statistics statistics); - public OpTraits getOpTraits(); - public void setOpTraits(OpTraits opTraits); + Object clone() throws CloneNotSupportedException; + Statistics getStatistics(); + void setStatistics(Statistics statistics); + OpTraits getOpTraits(); + void setOpTraits(OpTraits opTraits); } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 5d56511..6e7f239 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -72,23 +72,15 @@ import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.TextInputFormat; -/** - * PlanUtils. - * - */ public final class PlanUtils { protected static final Log LOG = LogFactory.getLog("org.apache.hadoop.hive.ql.plan.PlanUtils"); private static long countForMapJoinDumpFilePrefix = 0; - /** - * ExpressionTypes. - * - */ - public static enum ExpressionTypes { + public enum ExpressionTypes { FIELD, JEXL - }; + } public static synchronized long getCountForMapJoinDumpFilePrefix() { return countForMapJoinDumpFilePrefix++; @@ -109,7 +101,7 @@ public static MapredWork getMapRedWork() { public static TableDesc getDefaultTableDesc(CreateTableDesc localDirectoryDesc, String cols, String colTypes ) { TableDesc ret = getDefaultTableDesc(Integer.toString(Utilities.ctrlaCode), cols, - colTypes, false);; + colTypes, false); if (localDirectoryDesc == null) { return ret; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java index 8413fb7..7b9fe9e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/RoleDDLDesc.java @@ -70,16 +70,13 @@ public static String getShowRolePrincipalsSchema() { return roleShowRolePrincipals; } - public static enum RoleOperation { + public enum RoleOperation { DROP_ROLE("drop_role"), CREATE_ROLE("create_role"), SHOW_ROLE_GRANT("show_role_grant"), SHOW_ROLES("show_roles"), SET_ROLE("set_role"), SHOW_CURRENT_ROLE("show_current_role"), SHOW_ROLE_PRINCIPALS("show_role_principals"); private String operationName; - private RoleOperation() { - } - - private RoleOperation(String operationName) { + RoleOperation(String operationName) { this.operationName = operationName; } diff --git ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java index f7a3f1c..750f7e0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java @@ -63,7 +63,7 @@ public ExprInfo(boolean isCandidate, String alias, ExprNodeDesc replacedNode) { } protected static final Log LOG = LogFactory.getLog(OpProcFactory.class - .getName());; + .getName()); private Operator op = null; private RowResolver toRR = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java index d616952..ef03c2d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessor.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.CommandNeedRetryException; public interface CommandProcessor { - public void init(); + void init(); - public CommandProcessorResponse run(String command) throws CommandNeedRetryException; + CommandProcessorResponse run(String command) throws CommandNeedRetryException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/HiveAuthenticationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/HiveAuthenticationProvider.java index 7befff8..a2fbbf2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/HiveAuthenticationProvider.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/HiveAuthenticationProvider.java @@ -30,17 +30,17 @@ */ public interface HiveAuthenticationProvider extends Configurable{ - public String getUserName(); + String getUserName(); - public List getGroupNames(); + List getGroupNames(); - public void destroy() throws HiveException; + void destroy() throws HiveException; /** * This function is meant to be used only for hive internal implementations of this interface. * SessionState is not a public interface. * @param ss SessionState that created this instance */ - public void setSessionState(SessionState ss); + void setSessionState(SessionState ss); } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationFactory.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationFactory.java index 7e1696b..c7774e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationFactory.java @@ -54,7 +54,7 @@ private void invokeAuth(Method method, Object[] args) throws Throwable { AuthorizationFactory.class.getClassLoader(), new Class[]{itface}, invocation); } - public static interface AuthorizationExceptionHandler { + public interface AuthorizationExceptionHandler { void exception(Exception exception) throws AuthorizationException, HiveAuthzPluginException, HiveAccessControlException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/BitSetCheckedAuthorizationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/BitSetCheckedAuthorizationProvider.java index 2f2ceeb..cbb5bea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/BitSetCheckedAuthorizationProvider.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/BitSetCheckedAuthorizationProvider.java @@ -395,7 +395,6 @@ private boolean matchPrivs(Privilege[] inputPriv, && privileges.getRolePrivileges().size() > 0) { Collection> rolePrivsCollection = privileges .getRolePrivileges().values(); - ; List rolePrivs = getPrivilegeStringList(rolePrivsCollection); if (rolePrivs != null && rolePrivs.size() > 0) { for (String priv : rolePrivs) { diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java index 55a2e79..1d5d6ea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProvider.java @@ -34,11 +34,11 @@ */ public interface HiveAuthorizationProvider extends Configurable{ - public void init(Configuration conf) throws HiveException; + void init(Configuration conf) throws HiveException; - public HiveAuthenticationProvider getAuthenticator(); + HiveAuthenticationProvider getAuthenticator(); - public void setAuthenticator(HiveAuthenticationProvider authenticator); + void setAuthenticator(HiveAuthenticationProvider authenticator); /** * Authorization user level privileges. @@ -50,8 +50,7 @@ * @throws HiveException * @throws AuthorizationException */ - public void authorize(Privilege[] readRequiredPriv, - Privilege[] writeRequiredPriv) throws HiveException, + void authorize(Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException; /** @@ -66,9 +65,8 @@ public void authorize(Privilege[] readRequiredPriv, * @throws HiveException * @throws AuthorizationException */ - public void authorize(Database db, Privilege[] readRequiredPriv, - Privilege[] writeRequiredPriv) throws HiveException, - AuthorizationException; + void authorize(Database db, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException; /** * Authorization privileges against a hive table object. @@ -82,9 +80,8 @@ public void authorize(Database db, Privilege[] readRequiredPriv, * @throws HiveException * @throws AuthorizationException */ - public void authorize(Table table, Privilege[] readRequiredPriv, - Privilege[] writeRequiredPriv) throws HiveException, - AuthorizationException; + void authorize(Table table, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException; /** * Authorization privileges against a hive partition object. @@ -98,9 +95,8 @@ public void authorize(Table table, Privilege[] readRequiredPriv, * @throws HiveException * @throws AuthorizationException */ - public void authorize(Partition part, Privilege[] readRequiredPriv, - Privilege[] writeRequiredPriv) throws HiveException, - AuthorizationException; + void authorize(Partition part, Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + throws HiveException, AuthorizationException; /** * Authorization privileges against a list of columns. If the partition object @@ -120,8 +116,8 @@ public void authorize(Partition part, Privilege[] readRequiredPriv, * @throws HiveException * @throws AuthorizationException */ - public void authorize(Table table, Partition part, List columns, - Privilege[] readRequiredPriv, Privilege[] writeRequiredPriv) + void authorize(Table table, Partition part, List columns, Privilege[] readRequiredPriv, + Privilege[] writeRequiredPriv) throws HiveException, AuthorizationException; } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java index 14a4c4e..d7e3731 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/PrivilegeScope.java @@ -26,15 +26,15 @@ * db-level only, some are global, and some are table only. */ public enum PrivilegeScope { - - USER_LEVEL_SCOPE((short) 0x01), - DB_LEVEL_SCOPE((short) 0x02), - TABLE_LEVEL_SCOPE((short) 0x04), + + USER_LEVEL_SCOPE((short) 0x01), + DB_LEVEL_SCOPE((short) 0x02), + TABLE_LEVEL_SCOPE((short) 0x04), COLUMN_LEVEL_SCOPE((short) 0x08); private short mode; - private PrivilegeScope(short mode) { + PrivilegeScope(short mode) { this.mode = mode; } @@ -45,7 +45,7 @@ public short getMode() { public void setMode(short mode) { this.mode = mode; } - + public static EnumSet ALLSCOPE = EnumSet.of( PrivilegeScope.USER_LEVEL_SCOPE, PrivilegeScope.DB_LEVEL_SCOPE, PrivilegeScope.TABLE_LEVEL_SCOPE, PrivilegeScope.COLUMN_LEVEL_SCOPE); diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java index 911a943..2a7a306 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java @@ -38,12 +38,12 @@ @Evolving public interface HiveAuthorizer { - public enum VERSION { V1 }; + enum VERSION { V1 } /** * @return version of HiveAuthorizer interface that is implemented by this instance */ - public VERSION getVersion(); + VERSION getVersion(); /** * Grant privileges for principals on the object @@ -192,7 +192,7 @@ void checkPrivileges(HiveOperationType hiveOpType, List inp * or other parameters related to hive security * @param hiveConf */ - public void applyAuthorizationConfigPolicy(HiveConf hiveConf); + void applyAuthorizationConfigPolicy(HiveConf hiveConf); } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java index bb71a7e..05adf59 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java @@ -31,7 +31,7 @@ public enum CLIENT_TYPE { HIVESERVER2, HIVECLI - }; + } public static class Builder { private String userIpAddress; diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java index 9e9ef71..10b696c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java @@ -81,10 +81,11 @@ private int compare(Collection o1, Collection o2) { public enum HivePrivilegeObjectType { GLOBAL, DATABASE, TABLE_OR_VIEW, PARTITION, COLUMN, LOCAL_URI, DFS_URI, COMMAND_PARAMS, FUNCTION - } ; + } + public enum HivePrivObjectActionType { OTHER, INSERT, INSERT_OVERWRITE - }; + } private final HivePrivilegeObjectType type; private final String dbname; diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java index fbc0090..b035c0c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveV1Authorizer.java @@ -304,7 +304,7 @@ private void grantOrRevokeRole(List principals, List role // show all privileges privs.addAll(hive.showPrivilegeGrant(null, name, type, null, null, null, null)); } else { - Database dbObj = hive.getDatabase(privObj.getDbname());; + Database dbObj = hive.getDatabase(privObj.getDbname()); if (dbObj == null) { throw new HiveException("Database " + privObj.getDbname() + " does not exists"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java index 4a3d8a7..6716684 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java @@ -35,7 +35,7 @@ public enum IOType { INPUT, OUTPUT - }; + } private static class PrivRequirement { diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivTypeGrant.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivTypeGrant.java index 8de73d9..0ef202c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivTypeGrant.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivTypeGrant.java @@ -96,4 +96,4 @@ public String toString(){ return privDesc; } -}; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivilegeType.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivilegeType.java index bcd66d8..97bb504 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivilegeType.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLPrivilegeType.java @@ -40,4 +40,4 @@ public static SQLPrivilegeType getRequirePrivilege(String priv) } -}; +} diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index fcfcf42..c3f63e8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -145,7 +145,7 @@ private HiveAuthorizer authorizerV2; - public enum AuthorizationMode{V1, V2}; + public enum AuthorizationMode{V1, V2} private HiveAuthenticationProvider authenticator; @@ -754,11 +754,7 @@ static boolean unregisterJar(List jarsToUnregister) { } } - /** - * ResourceType. - * - */ - public static enum ResourceType { + public enum ResourceType { FILE, JAR { @@ -779,7 +775,7 @@ public void preHook(Set cur, List s) throws IllegalArgumentExcep } public void postHook(Set cur, List s) { } - }; + } public static ResourceType find_resource_type(String s) { diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/ClientStatsPublisher.java ql/src/java/org/apache/hadoop/hive/ql/stats/ClientStatsPublisher.java index 2480f03..38b0c31 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/ClientStatsPublisher.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/ClientStatsPublisher.java @@ -22,6 +22,6 @@ public interface ClientStatsPublisher { - public void run(Map counterValues, String jobID); + void run(Map counterValues, String jobID); } diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java index 026f4e0..027bd39 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/CounterStatsAggregator.java @@ -60,8 +60,8 @@ private JobConf toJobConf(Configuration hconf) { @Override public String aggregateStats(String counterGrpName, String statType) { // In case of counters, aggregation is done by JobTracker / MR AM itself - // so no need to aggregate, simply return the counter value for requested stat. - return String.valueOf(counters.getGroup(counterGrpName).getCounter(statType)); + // so no need to aggregate, simply return the counter value for requested stat. + return String.valueOf(counters.getGroup(counterGrpName).getCounter(statType)); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java index 0ae0489..2d2d560 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsAggregator.java @@ -35,7 +35,7 @@ * @param sourceTask * @return true if connection is successful, false otherwise. */ - public boolean connect(Configuration hconf, Task sourceTask); + boolean connect(Configuration hconf, Task sourceTask); /** * This method aggregates a given statistic from all tasks (partial stats). @@ -58,14 +58,14 @@ * a string noting the key to be published. Ex: "numRows". * @return a string representation of a long value, null if there are any error/exception. */ - public String aggregateStats(String keyPrefix, String statType); + String aggregateStats(String keyPrefix, String statType); /** * This method closes the connection to the temporary storage. * * @return true if close connection is successful, false otherwise. */ - public boolean closeConnection(); + boolean closeConnection(); /** * This method is called after all statistics have been aggregated. Since we support multiple @@ -81,5 +81,5 @@ * as the first parameter in aggregateStats(). * @return true if cleanup is successful, false otherwise. */ - public boolean cleanUp(String keyPrefix); + boolean cleanUp(String keyPrefix); } diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java index 845ec6a..20adce4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsPublisher.java @@ -37,14 +37,14 @@ * intermediate stats database. * @return true if initialization is successful, false otherwise. */ - public boolean init(Configuration hconf); + boolean init(Configuration hconf); /** * This method connects to the intermediate statistics database. * @param hconf HiveConf that contains the connection parameters. * @return true if connection is successful, false otherwise. */ - public boolean connect(Configuration hconf); + boolean connect(Configuration hconf); /** * This method publishes a given statistic into a disk storage, possibly HBase or MySQL. @@ -61,11 +61,11 @@ * and value is a string representing the value for the given statistic * @return true if successful, false otherwise */ - public boolean publishStat(String fileID, Map stats); + boolean publishStat(String fileID, Map stats); /** * This method closes the connection to the temporary storage. */ - public boolean closeConnection(); + boolean closeConnection(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java index 624afd1..bc12c26 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java @@ -186,7 +186,6 @@ public Text evaluate(Text n, IntWritable fromBase, IntWritable toBase) { // Find the first non-zero digit or the last digits if all are zero. for (first = 0; first < value.length - 1 && value[first] == 0; first++) { - ; } byte2char(Math.abs(toBs), first); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java index 66778e0..eed8272 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java @@ -227,7 +227,7 @@ private Object extract(Object json, String path) { public void removeRange(int fromIndex, int toIndex) { super.removeRange(fromIndex, toIndex); } - }; + } @SuppressWarnings("unchecked") private Object extract_json_withindex(Object json, ArrayList indexList) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java index d857421..57371db 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java @@ -328,7 +328,7 @@ protected HiveDecimalWritable getNextResult( private static class AverageAggregationBuffer implements AggregationBuffer { private long count; private TYPE sum; - }; + } @SuppressWarnings("unchecked") public static abstract class AbstractGenericUDAFAverageEvaluator extends GenericUDAFEvaluator { @@ -404,7 +404,7 @@ public ObjectInspector init(Mode m, ObjectInspector[] parameters) double sum; @Override public int estimate() { return JavaDataModel.PRIMITIVES2 * 2; } - }; + } @Override public void reset(AggregationBuffer aggregation) throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java index 9702529..f7250f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFComputeStats.java @@ -190,7 +190,7 @@ public int estimate() { JavaDataModel model = JavaDataModel.get(); return model.primitive2() * 3 + model.lengthFor(columnType); } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { @@ -480,7 +480,7 @@ public void reset(String type) throws HiveException { countNulls = 0; numDV = null; } - }; + } @Override public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { @@ -605,7 +605,7 @@ protected void updateMax(Object maxValue, LongObjectInspector maxFieldOI) { max = maxFieldOI.get(maxValue); } } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { @@ -668,7 +668,7 @@ protected void updateMax(Object maxValue, DoubleObjectInspector maxFieldOI) { max = maxFieldOI.get(maxValue); } } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { @@ -832,7 +832,7 @@ public int estimate() { return model.primitive1() * 2 + model.primitive2() * 4 + model.lengthFor(columnType) + model.lengthFor(numDV); } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { @@ -1130,7 +1130,7 @@ public int estimate() { JavaDataModel model = JavaDataModel.get(); return model.primitive2() * 4 + model.lengthFor(columnType); } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { @@ -1300,7 +1300,7 @@ protected void updateMax(Object maxValue, HiveDecimalObjectInspector maxFieldOI) max = maxFieldOI.getPrimitiveJavaObject(maxValue); } } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java index 17e9d76..16ba954 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFContextNGrams.java @@ -392,7 +392,7 @@ public Object terminate(AggregationBuffer agg) throws HiveException { static class NGramAggBuf extends AbstractAggregationBuffer { ArrayList context; NGramEstimator nge; - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java index 8056931..7acf759 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCorrelation.java @@ -256,7 +256,7 @@ public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveExc double covar; // n times the covariance @Override public int estimate() { return JavaDataModel.PRIMITIVES2 * 6; } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java index f1017be..a9d67df 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCovariance.java @@ -234,7 +234,7 @@ public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveExc double covar; // n times the covariance @Override public int estimate() { return JavaDataModel.PRIMITIVES2 * 4; } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java index fbadb91..42db2e2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCumeDist.java @@ -34,29 +34,29 @@ @WindowFunctionDescription ( - description = @Description( - name = "cume_dist", - value = "_FUNC_(x) - The CUME_DIST function (defined as the inverse of percentile in some " + - "statistical books) computes the position of a specified value relative to a set of values. " + - "To compute the CUME_DIST of a value x in a set S of size N, you use the formula: " + - "CUME_DIST(x) = number of values in S coming before " + - " and including x in the specified order/ N" - ), - supportsWindow = false, - pivotResult = true, - rankingFunction = true, - impliesOrder = true + description = @Description( + name = "cume_dist", + value = "_FUNC_(x) - The CUME_DIST function (defined as the inverse of percentile in some " + + "statistical books) computes the position of a specified value relative to a set of values. " + + "To compute the CUME_DIST of a value x in a set S of size N, you use the formula: " + + "CUME_DIST(x) = number of values in S coming before " + + " and including x in the specified order/ N" + ), + supportsWindow = false, + pivotResult = true, + rankingFunction = true, + impliesOrder = true ) public class GenericUDAFCumeDist extends GenericUDAFRank { - static final Log LOG = LogFactory.getLog(GenericUDAFCumeDist.class.getName()); + static final Log LOG = LogFactory.getLog(GenericUDAFCumeDist.class.getName()); - @Override + @Override protected GenericUDAFAbstractRankEvaluator createEvaluator() - { - return new GenericUDAFCumeDistEvaluator(); - } + { + return new GenericUDAFCumeDistEvaluator(); + } public static class GenericUDAFCumeDistEvaluator extends GenericUDAFAbstractRankEvaluator { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java index 8856fb7..26199c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFDenseRank.java @@ -23,41 +23,38 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.WindowFunctionDescription; -@WindowFunctionDescription -( - description = @Description( - name = "dense_rank", - value = "_FUNC_(x) The difference between RANK and DENSE_RANK is that DENSE_RANK leaves no " + - "gaps in ranking sequence when there are ties. That is, if you were " + - "ranking a competition using DENSE_RANK and had three people tie for " + - "second place, you would say that all three were in second place and " + - "that the next person came in third." - ), - supportsWindow = false, - pivotResult = true, - rankingFunction = true, - impliesOrder = true +@WindowFunctionDescription( + description = @Description( + name = "dense_rank", + value = "_FUNC_(x) The difference between RANK and DENSE_RANK is that DENSE_RANK leaves no " + + "gaps in ranking sequence when there are ties. That is, if you were " + + "ranking a competition using DENSE_RANK and had three people tie for " + + "second place, you would say that all three were in second place and " + + "that the next person came in third." + ), + supportsWindow = false, + pivotResult = true, + rankingFunction = true, + impliesOrder = true ) -public class GenericUDAFDenseRank extends GenericUDAFRank -{ - static final Log LOG = LogFactory.getLog(GenericUDAFDenseRank.class.getName()); +public class GenericUDAFDenseRank extends GenericUDAFRank { - @Override - protected GenericUDAFAbstractRankEvaluator createEvaluator() - { - return new GenericUDAFDenseRankEvaluator(); - } + static final Log LOG = LogFactory.getLog(GenericUDAFDenseRank.class.getName()); - public static class GenericUDAFDenseRankEvaluator extends GenericUDAFRankEvaluator - { - /* - * Called when the value in the partition has changed. Update the currentRank - */ - @Override + @Override + protected GenericUDAFAbstractRankEvaluator createEvaluator() { + return new GenericUDAFDenseRankEvaluator(); + } + + public static class GenericUDAFDenseRankEvaluator extends GenericUDAFRankEvaluator { + /* + * Called when the value in the partition has changed. Update the currentRank + */ + @Override protected void nextRank(RankBuffer rb) - { - rb.currentRank++; - } - } + { + rb.currentRank++; + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java index 4b22a5b..6f030f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFEvaluator.java @@ -31,9 +31,9 @@ /** * A Generic User-defined aggregation function (GenericUDAF) for the use with * Hive. - * + * * New GenericUDAF classes need to inherit from this GenericUDAF class. - * + * * The GenericUDAF are superior to normal UDAFs in the following ways: 1. It can * accept arguments of complex types, and return complex types. 2. It can accept * variable length of arguments. 3. It can accept an infinite number of function @@ -43,7 +43,7 @@ @UDFType(deterministic = true) public abstract class GenericUDAFEvaluator implements Closeable { - public static @interface AggregationType { + public @interface AggregationType { boolean estimable() default false; } @@ -60,7 +60,7 @@ public static boolean isEstimable(AggregationBuffer buffer) { * Mode. * */ - public static enum Mode { + public enum Mode { /** * PARTIAL1: from original data to partial aggregation data: iterate() and * terminatePartial() will be called. @@ -81,7 +81,7 @@ public static boolean isEstimable(AggregationBuffer buffer) { * terminate() will be called. */ COMPLETE - }; + } Mode mode; @@ -102,7 +102,7 @@ public void configure(MapredContext mapredContext) { /** * Initialize the evaluator. - * + * * @param m * The mode of aggregation. * @param parameters @@ -114,7 +114,7 @@ public void configure(MapredContext mapredContext) { * mode, the ObjectInspector for the return value of * terminatePartial() call; In FINAL and COMPLETE mode, the * ObjectInspector for the return value of terminate() call. - * + * * NOTE: We need ObjectInspector[] (in addition to the TypeInfo[] in * GenericUDAFResolver) for 2 reasons: 1. ObjectInspector contains * more information than TypeInfo; and GenericUDAFEvaluator.init at @@ -131,20 +131,21 @@ public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveExc /** * The interface for a class that is used to store the aggregation result * during the process of aggregation. - * + * * We split this piece of data out because there can be millions of instances * of this Aggregation in hash-based aggregation process, and it's very * important to conserve memory. - * + * * In the future, we may completely hide this class inside the Evaluator and * use integer numbers to identify which aggregation we are looking at. * * @deprecated use {@link AbstractAggregationBuffer} instead */ - public static interface AggregationBuffer { - }; + @Deprecated + public interface AggregationBuffer { + } - public static abstract class AbstractAggregationBuffer implements AggregationBuffer { + public abstract static class AbstractAggregationBuffer implements AggregationBuffer { /** * Estimate the size of memory which is occupied by aggregation buffer. * Currently, hive assumes that primitives types occupies 16 byte and java object has @@ -174,7 +175,7 @@ public void close() throws IOException { /** * This function will be called by GroupByOperator when it sees a new input * row. - * + * * @param agg * The object to store the aggregation result. * @param parameters @@ -192,7 +193,7 @@ public void aggregate(AggregationBuffer agg, Object[] parameters) throws HiveExc /** * This function will be called by GroupByOperator when it sees a new input * row. - * + * * @param agg * The object to store the aggregation result. */ @@ -206,7 +207,7 @@ public Object evaluate(AggregationBuffer agg) throws HiveException { /** * Iterate through original data. - * + * * @param parameters * The objects of parameters. */ @@ -214,7 +215,7 @@ public Object evaluate(AggregationBuffer agg) throws HiveException { /** * Get partial aggregation result. - * + * * @return partial aggregation result. */ public abstract Object terminatePartial(AggregationBuffer agg) throws HiveException; @@ -222,7 +223,7 @@ public Object evaluate(AggregationBuffer agg) throws HiveException { /** * Merge with partial aggregation result. NOTE: null might be passed in case * there is no input data. - * + * * @param partial * The partial aggregation result. */ @@ -230,7 +231,7 @@ public Object evaluate(AggregationBuffer agg) throws HiveException { /** * Get final aggregation result. - * + * * @return final aggregation result. */ public abstract Object terminate(AggregationBuffer agg) throws HiveException; @@ -245,7 +246,7 @@ public Object evaluate(AggregationBuffer agg) throws HiveException { * This method is called after this Evaluator is initialized. The returned * Function must be initialized. It is passed the 'window' of aggregation for * each row. - * + * * @param wFrmDef * the Window definition in play for this evaluation. * @return null implies that this fn cannot be processed in Streaming mode. So diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java index 2ca48b3..693f243 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFFirstValue.java @@ -41,147 +41,128 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -@WindowFunctionDescription -( - description = @Description( - name = "first_value", - value = "_FUNC_(x)" - ), - supportsWindow = true, - pivotResult = false, - impliesOrder = true +@WindowFunctionDescription( + description = @Description( + name = "first_value", + value = "_FUNC_(x)" + ), + supportsWindow = true, + pivotResult = false, + impliesOrder = true ) -public class GenericUDAFFirstValue extends AbstractGenericUDAFResolver -{ - static final Log LOG = LogFactory.getLog(GenericUDAFFirstValue.class.getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException - { - if (parameters.length > 2) - { - throw new UDFArgumentTypeException(2, "At most 2 arguments expected"); - } - if ( parameters.length > 1 && !parameters[1].equals(TypeInfoFactory.booleanTypeInfo) ) - { - throw new UDFArgumentTypeException(1, "second argument must be a boolean expression"); - } - return createEvaluator(); - } - - protected GenericUDAFFirstValueEvaluator createEvaluator() - { - return new GenericUDAFFirstValueEvaluator(); - } - - static class FirstValueBuffer implements AggregationBuffer - { - Object val; - boolean valSet; - boolean firstRow; - boolean skipNulls; - - FirstValueBuffer() - { - init(); - } - - void init() - { - val = null; - valSet = false; - firstRow = true; - skipNulls = false; - } - - } - - public static class GenericUDAFFirstValueEvaluator extends GenericUDAFEvaluator - { - ObjectInspector inputOI; - ObjectInspector outputOI; - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException - { - super.init(m, parameters); - if (m != Mode.COMPLETE) - { - throw new HiveException( - "Only COMPLETE mode supported for Rank function"); - } - inputOI = parameters[0]; - outputOI = ObjectInspectorUtils.getStandardObjectInspector(inputOI, ObjectInspectorCopyOption.WRITABLE); - return outputOI; - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException - { - return new FirstValueBuffer(); - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException - { - ((FirstValueBuffer) agg).init(); - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException - { - FirstValueBuffer fb = (FirstValueBuffer) agg; - - if (fb.firstRow ) - { - fb.firstRow = false; - if ( parameters.length == 2 ) - { - fb.skipNulls = PrimitiveObjectInspectorUtils.getBoolean( - parameters[1], - PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); - } - } - - if ( !fb.valSet ) - { - fb.val = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputOI, ObjectInspectorCopyOption.WRITABLE); - if ( !fb.skipNulls || fb.val != null ) - { - fb.valSet = true; - } - } - } - - @Override - public Object terminatePartial(AggregationBuffer agg) throws HiveException - { - throw new HiveException("terminatePartial not supported"); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) throws HiveException - { - throw new HiveException("merge not supported"); - } - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - return ((FirstValueBuffer) agg).val; - } - +public class GenericUDAFFirstValue extends AbstractGenericUDAFResolver { + + static final Log LOG = LogFactory.getLog(GenericUDAFFirstValue.class.getName()); + + @Override + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + if (parameters.length > 2) { + throw new UDFArgumentTypeException(2, "At most 2 arguments expected"); + } + if (parameters.length > 1 && !parameters[1].equals(TypeInfoFactory.booleanTypeInfo)) { + throw new UDFArgumentTypeException(1, "second argument must be a boolean expression"); + } + return createEvaluator(); + } + + protected GenericUDAFFirstValueEvaluator createEvaluator() { + return new GenericUDAFFirstValueEvaluator(); + } + + static class FirstValueBuffer implements AggregationBuffer { + + Object val; + boolean valSet; + boolean firstRow; + boolean skipNulls; + + FirstValueBuffer() { + init(); + } + + void init() { + val = null; + valSet = false; + firstRow = true; + skipNulls = false; + } + + } + + public static class GenericUDAFFirstValueEvaluator extends GenericUDAFEvaluator { + + ObjectInspector inputOI; + ObjectInspector outputOI; + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + super.init(m, parameters); + if (m != Mode.COMPLETE) { + throw new HiveException("Only COMPLETE mode supported for Rank function"); + } + inputOI = parameters[0]; + outputOI = ObjectInspectorUtils.getStandardObjectInspector(inputOI, + ObjectInspectorCopyOption.WRITABLE); + return outputOI; + } + + @Override + public AggregationBuffer getNewAggregationBuffer() throws HiveException { + return new FirstValueBuffer(); + } + + @Override + public void reset(AggregationBuffer agg) throws HiveException { + ((FirstValueBuffer) agg).init(); + } + + @Override + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { + FirstValueBuffer fb = (FirstValueBuffer) agg; + + if (fb.firstRow) { + fb.firstRow = false; + if (parameters.length == 2) { + fb.skipNulls = PrimitiveObjectInspectorUtils.getBoolean(parameters[1], + PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); + } + } + + if (!fb.valSet) { + fb.val = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputOI, + ObjectInspectorCopyOption.WRITABLE); + if (!fb.skipNulls || fb.val != null) { + fb.valSet = true; + } + } + } + + @Override + public Object terminatePartial(AggregationBuffer agg) throws HiveException { + throw new HiveException("terminatePartial not supported"); + } + + @Override + public void merge(AggregationBuffer agg, Object partial) throws HiveException { + throw new HiveException("merge not supported"); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + return ((FirstValueBuffer) agg).val; + } + @Override public GenericUDAFEvaluator getWindowingEvaluator(WindowFrameDef wFrmDef) { BoundaryDef start = wFrmDef.getStart(); BoundaryDef end = wFrmDef.getEnd(); - return new FirstValStreamingFixedWindow(this, start.getAmt(), - end.getAmt()); + return new FirstValStreamingFixedWindow(this, start.getAmt(), end.getAmt()); } - } - + } + static class ValIndexPair { + Object val; int idx; @@ -191,16 +172,15 @@ public GenericUDAFEvaluator getWindowingEvaluator(WindowFrameDef wFrmDef) { } } - static class FirstValStreamingFixedWindow extends - GenericUDAFStreamingEvaluator { + static class FirstValStreamingFixedWindow extends GenericUDAFStreamingEvaluator { class State extends GenericUDAFStreamingEvaluator.StreamingState { + private final Deque valueChain; - public State(int numPreceding, int numFollowing, AggregationBuffer buf) { + State(int numPreceding, int numFollowing, AggregationBuffer buf) { super(numPreceding, numFollowing, buf); - valueChain = new ArrayDeque(numPreceding + numFollowing - + 1); + valueChain = new ArrayDeque(numPreceding + numFollowing + 1); } @Override @@ -222,8 +202,8 @@ public int estimate() { */ int wdwSz = numPreceding + numFollowing + 1; - return underlying + (underlying * wdwSz) + (underlying * wdwSz) - + (3 * JavaDataModel.PRIMITIVES1); + return underlying + (underlying * wdwSz) + (underlying * wdwSz) + + (3 * JavaDataModel.PRIMITIVES1); } protected void reset() { @@ -232,8 +212,8 @@ protected void reset() { } } - public FirstValStreamingFixedWindow(GenericUDAFEvaluator wrappedEval, - int numPreceding, int numFollowing) { + FirstValStreamingFixedWindow(GenericUDAFEvaluator wrappedEval, int numPreceding, + int numFollowing) { super(wrappedEval, numPreceding, numFollowing); } @@ -253,8 +233,7 @@ protected ObjectInspector inputOI() { } @Override - public void iterate(AggregationBuffer agg, Object[] parameters) - throws HiveException { + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { State s = (State) agg; FirstValueBuffer fb = (FirstValueBuffer) s.wrappedBuf; @@ -266,15 +245,14 @@ public void iterate(AggregationBuffer agg, Object[] parameters) wrappedEval.iterate(fb, parameters); } - Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], - inputOI(), ObjectInspectorCopyOption.WRITABLE); + Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputOI(), + ObjectInspectorCopyOption.WRITABLE); /* * add row to chain. except in case of UNB preceding: - only 1 firstVal * needs to be tracked. */ - if (s.numPreceding != BoundarySpec.UNBOUNDED_AMOUNT - || s.valueChain.isEmpty()) { + if (s.numPreceding != BoundarySpec.UNBOUNDED_AMOUNT || s.valueChain.isEmpty()) { /* * add value to chain if it is not null or if skipNulls is false. */ @@ -283,12 +261,12 @@ public void iterate(AggregationBuffer agg, Object[] parameters) } } - if (s.numRows >= (s.numFollowing)) { + if (s.numRows >= s.numFollowing) { /* * if skipNulls is true and there are no rows in valueChain => all rows * in partition are null so far; so add null in o/p */ - if (fb.skipNulls && s.valueChain.size() == 0) { + if (fb.skipNulls && s.valueChain.isEmpty()) { s.results.add(null); } else { s.results.add(s.valueChain.getFirst().val); @@ -296,8 +274,8 @@ public void iterate(AggregationBuffer agg, Object[] parameters) } s.numRows++; - if (s.valueChain.size() > 0) { - int fIdx = (Integer) s.valueChain.getFirst().idx; + if (!s.valueChain.isEmpty()) { + int fIdx = s.valueChain.getFirst().idx; if (s.numPreceding != BoundarySpec.UNBOUNDED_AMOUNT && s.numRows > fIdx + s.numPreceding + s.numFollowing) { s.valueChain.removeFirst(); @@ -309,19 +287,18 @@ public void iterate(AggregationBuffer agg, Object[] parameters) public Object terminate(AggregationBuffer agg) throws HiveException { State s = (State) agg; FirstValueBuffer fb = (FirstValueBuffer) s.wrappedBuf; - ValIndexPair r = fb.skipNulls && s.valueChain.size() == 0 ? null - : s.valueChain.getFirst(); + ValIndexPair r = fb.skipNulls && s.valueChain.isEmpty() ? null : s.valueChain.getFirst(); for (int i = 0; i < s.numFollowing; i++) { s.results.add(r == null ? null : r.val); s.numRows++; if (r != null) { - int fIdx = (Integer) r.idx; + int fIdx = r.idx; if (s.numPreceding != BoundarySpec.UNBOUNDED_AMOUNT && s.numRows > fIdx + s.numPreceding + s.numFollowing && !s.valueChain.isEmpty()) { s.valueChain.removeFirst(); - r = !s.valueChain.isEmpty() ? s.valueChain.getFirst() : r; + r = s.valueChain.isEmpty() ? r : s.valueChain.getFirst(); } } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java index f2e8e03..f5864e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFHistogramNumeric.java @@ -237,7 +237,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveExcep public int estimate() { return JavaDataModel.get().lengthFor(histogram); } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java index 99c41b5..7abc95c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFLastValue.java @@ -37,131 +37,107 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -@WindowFunctionDescription(description = @Description(name = "last_value", value = "_FUNC_(x)"), supportsWindow = true, pivotResult = false, impliesOrder = true) -public class GenericUDAFLastValue extends AbstractGenericUDAFResolver -{ - static final Log LOG = LogFactory.getLog(GenericUDAFLastValue.class - .getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException - { - if (parameters.length > 2) - { - throw new UDFArgumentTypeException(2, "At most 2 arguments expected"); - } - if ( parameters.length > 1 && !parameters[1].equals(TypeInfoFactory.booleanTypeInfo) ) - { - throw new UDFArgumentTypeException(1, "second argument must be a boolean expression"); - } - return createEvaluator(); - } - - protected GenericUDAFLastValueEvaluator createEvaluator() - { - return new GenericUDAFLastValueEvaluator(); - } - - static class LastValueBuffer implements AggregationBuffer - { - Object val; - boolean firstRow; - boolean skipNulls; - - LastValueBuffer() - { - init(); - } - - void init() - { - val = null; - firstRow = true; - skipNulls = false; - } - - } - - public static class GenericUDAFLastValueEvaluator extends - GenericUDAFEvaluator - { - ObjectInspector inputOI; - ObjectInspector outputOI; - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) - throws HiveException - { - super.init(m, parameters); - if (m != Mode.COMPLETE) - { - throw new HiveException( - "Only COMPLETE mode supported for Rank function"); - } - inputOI = parameters[0]; - outputOI = ObjectInspectorUtils.getStandardObjectInspector(inputOI, - ObjectInspectorCopyOption.WRITABLE); - return outputOI; - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException - { - return new LastValueBuffer(); - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException - { - ((LastValueBuffer) agg).init(); - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) - throws HiveException - { - LastValueBuffer lb = (LastValueBuffer) agg; - if (lb.firstRow ) - { - lb.firstRow = false; - if ( parameters.length == 2 ) - { - lb.skipNulls = PrimitiveObjectInspectorUtils.getBoolean( - parameters[1], - PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); - } - } - - Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], - inputOI, ObjectInspectorCopyOption.WRITABLE); +@WindowFunctionDescription(description = @Description(name = "last_value", value = "_FUNC_(x)"), + supportsWindow = true, pivotResult = false, impliesOrder = true) +public class GenericUDAFLastValue extends AbstractGenericUDAFResolver { + + static final Log LOG = LogFactory.getLog(GenericUDAFLastValue.class.getName()); + + @Override + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + if (parameters.length > 2) { + throw new UDFArgumentTypeException(2, "At most 2 arguments expected"); + } + if (parameters.length > 1 && !parameters[1].equals(TypeInfoFactory.booleanTypeInfo)) { + throw new UDFArgumentTypeException(1, "second argument must be a boolean expression"); + } + return createEvaluator(); + } + + protected GenericUDAFLastValueEvaluator createEvaluator() { + return new GenericUDAFLastValueEvaluator(); + } + + static class LastValueBuffer implements AggregationBuffer { + + Object val; + boolean firstRow; + boolean skipNulls; + + LastValueBuffer() { + init(); + } + + void init() { + val = null; + firstRow = true; + skipNulls = false; + } + + } + + public static class GenericUDAFLastValueEvaluator extends GenericUDAFEvaluator { + + ObjectInspector inputOI; + ObjectInspector outputOI; + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + super.init(m, parameters); + if (m != Mode.COMPLETE) { + throw new HiveException("Only COMPLETE mode supported for Rank function"); + } + inputOI = parameters[0]; + outputOI = ObjectInspectorUtils.getStandardObjectInspector(inputOI, + ObjectInspectorCopyOption.WRITABLE); + return outputOI; + } + + @Override + public AggregationBuffer getNewAggregationBuffer() throws HiveException { + return new LastValueBuffer(); + } + + @Override + public void reset(AggregationBuffer agg) throws HiveException { + ((LastValueBuffer) agg).init(); + } + + @Override + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { + LastValueBuffer lb = (LastValueBuffer) agg; + if (lb.firstRow) { + lb.firstRow = false; + if (parameters.length == 2) { + lb.skipNulls = PrimitiveObjectInspectorUtils.getBoolean(parameters[1], + PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); + } + } + + Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputOI, + ObjectInspectorCopyOption.WRITABLE); if (!lb.skipNulls || o != null) { lb.val = o; } - } - - @Override - public Object terminatePartial(AggregationBuffer agg) - throws HiveException - { - throw new HiveException("terminatePartial not supported"); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) - throws HiveException - { - throw new HiveException("merge not supported"); - } - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - LastValueBuffer lb = (LastValueBuffer) agg; - return lb.val; - - } + } + + @Override + public Object terminatePartial(AggregationBuffer agg) throws HiveException { + throw new HiveException("terminatePartial not supported"); + } + + @Override + public void merge(AggregationBuffer agg, Object partial) throws HiveException { + throw new HiveException("merge not supported"); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + LastValueBuffer lb = (LastValueBuffer) agg; + return lb.val; + + } @Override public GenericUDAFEvaluator getWindowingEvaluator(WindowFrameDef wFrmDef) { @@ -169,16 +145,16 @@ public GenericUDAFEvaluator getWindowingEvaluator(WindowFrameDef wFrmDef) { BoundaryDef end = wFrmDef.getEnd(); return new LastValStreamingFixedWindow(this, start.getAmt(), end.getAmt()); } - } + } - static class LastValStreamingFixedWindow extends - GenericUDAFStreamingEvaluator { + static class LastValStreamingFixedWindow extends GenericUDAFStreamingEvaluator { class State extends GenericUDAFStreamingEvaluator.StreamingState { + private Object lastValue; private int lastIdx; - public State(int numPreceding, int numFollowing, AggregationBuffer buf) { + State(int numPreceding, int numFollowing, AggregationBuffer buf) { super(numPreceding, numFollowing, buf); lastValue = null; lastIdx = -1; @@ -203,8 +179,8 @@ protected void reset() { } } - public LastValStreamingFixedWindow(GenericUDAFEvaluator wrappedEval, - int numPreceding, int numFollowing) { + LastValStreamingFixedWindow(GenericUDAFEvaluator wrappedEval, int numPreceding, + int numFollowing) { super(wrappedEval, numPreceding, numFollowing); } @@ -224,8 +200,7 @@ protected ObjectInspector inputOI() { } @Override - public void iterate(AggregationBuffer agg, Object[] parameters) - throws HiveException { + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { State s = (State) agg; LastValueBuffer lb = (LastValueBuffer) s.wrappedBuf; @@ -237,8 +212,8 @@ public void iterate(AggregationBuffer agg, Object[] parameters) wrappedEval.iterate(lb, parameters); } - Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], - inputOI(), ObjectInspectorCopyOption.WRITABLE); + Object o = ObjectInspectorUtils.copyToStandardObject(parameters[0], inputOI(), + ObjectInspectorCopyOption.WRITABLE); if (!lb.skipNulls || o != null) { s.lastValue = o; @@ -251,7 +226,7 @@ public void iterate(AggregationBuffer agg, Object[] parameters) } } - if (s.numRows >= (s.numFollowing)) { + if (s.numRows >= s.numFollowing) { s.results.add(s.lastValue); } s.numRows++; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java index 18cde76..24ec519 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFNTile.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import java.util.ArrayList; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,144 +39,129 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.IntWritable; -@WindowFunctionDescription -( - description = @Description( - name = "rank", - value = "_FUNC_(x) NTILE allows easy calculation of tertiles, quartiles, deciles and other " + - "common summary statistics. This function divides an ordered partition into a specified " + - "number of groups called buckets and assigns a bucket number to each row in the partition." - ), - supportsWindow = false, - pivotResult = true +@WindowFunctionDescription( + description = @Description( + name = "rank", + value = "_FUNC_(x) NTILE allows easy calculation of tertiles, quartiles, deciles and other " + + + "common summary statistics. This function divides an ordered partition into a specified " + + + "number of groups called buckets and assigns a bucket number to each row in the partition." + ), + supportsWindow = false, + pivotResult = true ) -public class GenericUDAFNTile extends AbstractGenericUDAFResolver -{ - static final Log LOG = LogFactory.getLog(GenericUDAFNTile.class.getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException - { - if (parameters.length != 1) - { - throw new UDFArgumentTypeException(parameters.length - 1, "Exactly one argument is expected."); - } - ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]); - - boolean c = ObjectInspectorUtils.compareTypes(oi, PrimitiveObjectInspectorFactory.writableIntObjectInspector); - if (!c) - { - throw new UDFArgumentTypeException(0, "Number of tiles must be an int expression"); - } - - return new GenericUDAFNTileEvaluator(); - } - - static class NTileBuffer implements AggregationBuffer - { - Integer numBuckets; - int numRows; - - void init() - { - numBuckets = null; - numRows = 0; - } - - NTileBuffer() - { - init(); - } - } - - public static class GenericUDAFNTileEvaluator extends GenericUDAFEvaluator - { - private transient PrimitiveObjectInspector inputOI; - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException - { - assert (parameters.length == 1); - super.init(m, parameters); - if (m != Mode.COMPLETE) - { - throw new HiveException( - "Only COMPLETE mode supported for NTile function"); - } - inputOI = (PrimitiveObjectInspector) parameters[0]; - return ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableIntObjectInspector); - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException - { - return new NTileBuffer(); - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException - { - ((NTileBuffer) agg).init(); - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException - { - NTileBuffer rb = (NTileBuffer) agg; - if ( rb.numBuckets == null) - { - rb.numBuckets = PrimitiveObjectInspectorUtils.getInt(parameters[0], inputOI); - } - rb.numRows++; - } - - @Override - public Object terminatePartial(AggregationBuffer agg) throws HiveException - { - throw new HiveException("terminatePartial not supported"); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) throws HiveException - { - throw new HiveException("merge not supported"); - } - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - NTileBuffer rb = (NTileBuffer) agg; - ArrayList res = new ArrayList(rb.numRows); - - /* - * if there is a remainder from numRows/numBuckets; then distribute increase the size of the first 'rem' buckets by 1. - */ - - int bucketsz = rb.numRows / rb.numBuckets; - int rem = rb.numRows % rb.numBuckets; - int start = 0; - int bucket = 1; - while ( start < rb.numRows) - { - int end = start + bucketsz; - if (rem > 0) - { - end++; rem--; - } - end = Math.min(rb.numRows, end); - for(int i = start; i < end; i++) - { - res.add(new IntWritable(bucket)); - } - start = end; - bucket++; - } - - return res; - } - - } - +public class GenericUDAFNTile extends AbstractGenericUDAFResolver { + + static final Log LOG = LogFactory.getLog(GenericUDAFNTile.class.getName()); + + @Override + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + if (parameters.length != 1) { + throw new UDFArgumentTypeException(parameters.length - 1, + "Exactly one argument is expected."); + } + ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[0]); + + boolean c = ObjectInspectorUtils.compareTypes(oi, + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + if (!c) { + throw new UDFArgumentTypeException(0, "Number of tiles must be an int expression"); + } + + return new GenericUDAFNTileEvaluator(); + } + + static class NTileBuffer implements AggregationBuffer { + + Integer numBuckets; + int numRows; + + void init() { + numBuckets = null; + numRows = 0; + } + + NTileBuffer() { + init(); + } + } + + public static class GenericUDAFNTileEvaluator extends GenericUDAFEvaluator { + + private PrimitiveObjectInspector inputOI; + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + assert parameters.length == 1; + super.init(m, parameters); + if (m != Mode.COMPLETE) { + throw new HiveException("Only COMPLETE mode supported for NTile function"); + } + inputOI = (PrimitiveObjectInspector) parameters[0]; + return ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + } + + @Override + public AggregationBuffer getNewAggregationBuffer() throws HiveException { + return new NTileBuffer(); + } + + @Override + public void reset(AggregationBuffer agg) throws HiveException { + ((NTileBuffer) agg).init(); + } + + @Override + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { + NTileBuffer rb = (NTileBuffer) agg; + if (rb.numBuckets == null) { + rb.numBuckets = PrimitiveObjectInspectorUtils.getInt(parameters[0], inputOI); + } + rb.numRows++; + } + + @Override + public Object terminatePartial(AggregationBuffer agg) throws HiveException { + throw new HiveException("terminatePartial not supported"); + } + + @Override + public void merge(AggregationBuffer agg, Object partial) throws HiveException { + throw new HiveException("merge not supported"); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + NTileBuffer rb = (NTileBuffer) agg; + List res = new ArrayList(rb.numRows); + + /* + * if there is a remainder from numRows/numBuckets; then distribute increase the size of the first 'rem' buckets by 1. + */ + int bucketsz = rb.numRows / rb.numBuckets; + int rem = rb.numRows % rb.numBuckets; + int start = 0; + int bucket = 1; + while (start < rb.numRows) { + int end = start + bucketsz; + if (rem > 0) { + end++; + rem--; + } + end = Math.min(rb.numRows, end); + for (int i = start; i < end; i++) { + res.add(new IntWritable(bucket)); + } + start = end; + bucket++; + } + + return res; + } + + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentRank.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentRank.java index 1cca03e..b4dca9e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentRank.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentRank.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import java.util.ArrayList; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,56 +32,52 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.IntWritable; -@WindowFunctionDescription -( - description = @Description( - name = "percent_rank", - value = "_FUNC_(x) PERCENT_RANK is similar to CUME_DIST, but it uses rank values rather " + - "than row counts in its numerator. PERCENT_RANK of a row is calculated as: " + - "(rank of row in its partition - 1) / (number of rows in the partition - 1)" - ), - supportsWindow = false, - pivotResult = true, - rankingFunction = true, - impliesOrder = true +@WindowFunctionDescription( + description = @Description( + name = "percent_rank", + value = "_FUNC_(x) PERCENT_RANK is similar to CUME_DIST, but it uses rank values rather " + + "than row counts in its numerator. PERCENT_RANK of a row is calculated as: " + + "(rank of row in its partition - 1) / (number of rows in the partition - 1)" + ), + supportsWindow = false, + pivotResult = true, + rankingFunction = true, + impliesOrder = true ) -public class GenericUDAFPercentRank extends GenericUDAFRank -{ - static final Log LOG = LogFactory.getLog(GenericUDAFPercentRank.class.getName()); +public class GenericUDAFPercentRank extends GenericUDAFRank { - @Override - protected GenericUDAFAbstractRankEvaluator createEvaluator() - { - return new GenericUDAFPercentRankEvaluator(); - } + static final Log LOG = LogFactory.getLog(GenericUDAFPercentRank.class.getName()); - public static class GenericUDAFPercentRankEvaluator extends GenericUDAFAbstractRankEvaluator - { - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException - { - super.init(m, parameters); - return ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableDoubleObjectInspector); - } + @Override + protected GenericUDAFAbstractRankEvaluator createEvaluator() { + return new GenericUDAFPercentRankEvaluator(); + } - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - ArrayList ranks = ((RankBuffer) agg).rowNums; - double sz = ranks.size(); - if ( sz > 1 ) { - sz = sz - 1; + public static class GenericUDAFPercentRankEvaluator extends GenericUDAFAbstractRankEvaluator { + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + super.init(m, parameters); + return ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableDoubleObjectInspector); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + ArrayList ranks = ((RankBuffer) agg).rowNums; + double sz = ranks.size(); + if (sz > 1) { + sz -= 1; } - ArrayList pranks = new ArrayList(ranks.size()); + List pranks = new ArrayList(ranks.size()); - for(IntWritable i : ranks) - { - double pr = ((double)i.get() - 1)/sz; - pranks.add(new DoubleWritable(pr)); - } + for (IntWritable i : ranks) { + double pr = ((double) i.get() - 1) / sz; + pranks.add(new DoubleWritable(pr)); + } - return pranks; - } - } + return pranks; + } + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java index 56e76be..356f62d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFPercentileApprox.java @@ -359,7 +359,7 @@ public int estimate() { return model.lengthFor(histogram) + model.array() + JavaDataModel.PRIMITIVES2 * quantiles.length; } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java index 3eea6b2..c5995ff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRank.java @@ -38,170 +38,152 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.IntWritable; -@WindowFunctionDescription -( - description = @Description( - name = "rank", - value = "_FUNC_(x)" - ), - supportsWindow = false, - pivotResult = true, - rankingFunction = true, - impliesOrder = true +@WindowFunctionDescription( + description = @Description( + name = "rank", + value = "_FUNC_(x)" + ), + supportsWindow = false, + pivotResult = true, + rankingFunction = true, + impliesOrder = true ) -public class GenericUDAFRank extends AbstractGenericUDAFResolver -{ - static final Log LOG = LogFactory.getLog(GenericUDAFRank.class.getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException - { - if (parameters.length < 1) - { - throw new UDFArgumentTypeException(parameters.length - 1, "One or more arguments are expected."); - } - for(int i=0; i type or complex type containing map<>."); - } - } - return createEvaluator(); - } - - protected GenericUDAFAbstractRankEvaluator createEvaluator() - { - return new GenericUDAFRankEvaluator(); - } - - static class RankBuffer implements AggregationBuffer - { - ArrayList rowNums; - int currentRowNum; - Object[] currVal; - int currentRank; - int numParams; - boolean supportsStreaming; - - RankBuffer(int numParams, boolean supportsStreaming) - { - this.numParams = numParams; - this.supportsStreaming = supportsStreaming; - init(); - } - - void init() - { - rowNums = new ArrayList(); - currentRowNum = 0; - currentRank = 0; - currVal = new Object[numParams]; - if ( supportsStreaming ) { - /* initialize rowNums to have 1 row */ - rowNums.add(null); - } - } - - void incrRowNum() { currentRowNum++; } - - void addRank() - { - if ( supportsStreaming ) { - rowNums.set(0, new IntWritable(currentRank)); - } else { - rowNums.add(new IntWritable(currentRank)); - } - } - } - - public static abstract class GenericUDAFAbstractRankEvaluator extends GenericUDAFEvaluator - { - ObjectInspector[] inputOI; - ObjectInspector[] outputOI; - boolean isStreamingMode = false; - - protected boolean isStreaming() { - return isStreamingMode; - } - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException - { - super.init(m, parameters); - if (m != Mode.COMPLETE) - { - throw new HiveException( - "Only COMPLETE mode supported for Rank function"); - } - inputOI = parameters; - outputOI = new ObjectInspector[inputOI.length]; - for(int i=0; i < inputOI.length; i++) - { - outputOI[i] = ObjectInspectorUtils.getStandardObjectInspector(inputOI[i], ObjectInspectorCopyOption.JAVA); - } - return ObjectInspectorFactory.getStandardListObjectInspector(PrimitiveObjectInspectorFactory.writableIntObjectInspector); - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException - { - return new RankBuffer(inputOI.length, isStreamingMode); - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException - { - ((RankBuffer) agg).init(); - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException - { - RankBuffer rb = (RankBuffer) agg; - int c = GenericUDAFRank.compare(rb.currVal, outputOI, parameters, inputOI); - rb.incrRowNum(); - if ( rb.currentRowNum == 1 || c != 0 ) - { - nextRank(rb); - rb.currVal = GenericUDAFRank.copyToStandardObject(parameters, inputOI, ObjectInspectorCopyOption.JAVA); - } - rb.addRank(); - } - - /* - * Called when the value in the partition has changed. Update the currentRank - */ - protected void nextRank(RankBuffer rb) - { - rb.currentRank = rb.currentRowNum; - } - - @Override - public Object terminatePartial(AggregationBuffer agg) throws HiveException - { - throw new HiveException("terminatePartial not supported"); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) throws HiveException - { - throw new HiveException("merge not supported"); - } - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - return ((RankBuffer) agg).rowNums; - } - - } - - public static class GenericUDAFRankEvaluator extends - GenericUDAFAbstractRankEvaluator implements - ISupportStreamingModeForWindowing { +public class GenericUDAFRank extends AbstractGenericUDAFResolver { + + static final Log LOG = LogFactory.getLog(GenericUDAFRank.class.getName()); + + @Override + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + if (parameters.length < 1) { + throw new UDFArgumentTypeException(parameters.length - 1, + "One or more arguments are expected."); + } + for (int i = 0; i < parameters.length; i++) { + ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(parameters[i]); + if (!ObjectInspectorUtils.compareSupported(oi)) { + throw new UDFArgumentTypeException(i, + "Cannot support comparison of map<> type or complex type containing map<>."); + } + } + return createEvaluator(); + } + + protected GenericUDAFAbstractRankEvaluator createEvaluator() { + return new GenericUDAFRankEvaluator(); + } + + static class RankBuffer implements AggregationBuffer { + + ArrayList rowNums; + int currentRowNum; + Object[] currVal; + int currentRank; + int numParams; + boolean supportsStreaming; + + RankBuffer(int numParams, boolean supportsStreaming) { + this.numParams = numParams; + this.supportsStreaming = supportsStreaming; + init(); + } + + void init() { + rowNums = new ArrayList(); + currentRowNum = 0; + currentRank = 0; + currVal = new Object[numParams]; + if (supportsStreaming) { + /* initialize rowNums to have 1 row */ + rowNums.add(null); + } + } + + void incrRowNum() { currentRowNum++; } + + void addRank() { + if (supportsStreaming) { + rowNums.set(0, new IntWritable(currentRank)); + } else { + rowNums.add(new IntWritable(currentRank)); + } + } + } + + public abstract static class GenericUDAFAbstractRankEvaluator extends GenericUDAFEvaluator { + + ObjectInspector[] inputOI; + ObjectInspector[] outputOI; + boolean isStreamingMode = false; + + protected boolean isStreaming() { + return isStreamingMode; + } + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + super.init(m, parameters); + if (m != Mode.COMPLETE) { + throw new HiveException("Only COMPLETE mode supported for Rank function"); + } + inputOI = parameters; + outputOI = new ObjectInspector[inputOI.length]; + for (int i = 0; i < inputOI.length; i++) { + outputOI[i] = ObjectInspectorUtils.getStandardObjectInspector(inputOI[i], + ObjectInspectorCopyOption.JAVA); + } + return ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + } + + @Override + public AggregationBuffer getNewAggregationBuffer() throws HiveException { + return new RankBuffer(inputOI.length, isStreamingMode); + } + + @Override + public void reset(AggregationBuffer agg) throws HiveException { + ((RankBuffer) agg).init(); + } + + @Override + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { + RankBuffer rb = (RankBuffer) agg; + int c = GenericUDAFRank.compare(rb.currVal, outputOI, parameters, inputOI); + rb.incrRowNum(); + if (rb.currentRowNum == 1 || c != 0) { + nextRank(rb); + rb.currVal = + GenericUDAFRank.copyToStandardObject(parameters, inputOI, ObjectInspectorCopyOption.JAVA); + } + rb.addRank(); + } + + /* + * Called when the value in the partition has changed. Update the currentRank + */ + protected void nextRank(RankBuffer rb) { + rb.currentRank = rb.currentRowNum; + } + + @Override + public Object terminatePartial(AggregationBuffer agg) throws HiveException { + throw new HiveException("terminatePartial not supported"); + } + + @Override + public void merge(AggregationBuffer agg, Object partial) throws HiveException { + throw new HiveException("merge not supported"); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + return ((RankBuffer) agg).rowNums; + } + + } + + public static class GenericUDAFRankEvaluator extends GenericUDAFAbstractRankEvaluator + implements ISupportStreamingModeForWindowing { @Override public Object getNextResult(AggregationBuffer agg) throws HiveException { @@ -215,18 +197,15 @@ public GenericUDAFEvaluator getWindowingEvaluator(WindowFrameDef wFrmDef) { } @Override - public int getRowsRemainingAfterTerminate() - throws HiveException { + public int getRowsRemainingAfterTerminate() throws HiveException { return 0; } } public static int compare(Object[] o1, ObjectInspector[] oi1, Object[] o2, - ObjectInspector[] oi2) - { + ObjectInspector[] oi2) { int c = 0; - for (int i = 0; i < oi1.length; i++) - { + for (int i = 0; i < oi1.length; i++) { c = ObjectInspectorUtils.compare(o1[i], oi1[i], o2[i], oi2[i]); if (c != 0) { return c; @@ -235,15 +214,11 @@ public static int compare(Object[] o1, ObjectInspector[] oi1, Object[] o2, return c; } - public static Object[] copyToStandardObject(Object[] o, - ObjectInspector[] oi, - ObjectInspectorCopyOption objectInspectorOption) - { + public static Object[] copyToStandardObject(Object[] o, ObjectInspector[] oi, + ObjectInspectorCopyOption objectInspectorOption) { Object[] out = new Object[o.length]; - for (int i = 0; i < oi.length; i++) - { - out[i] = ObjectInspectorUtils.copyToStandardObject(o[i], oi[i], - objectInspectorOption); + for (int i = 0; i < oi.length; i++) { + out[i] = ObjectInspectorUtils.copyToStandardObject(o[i], oi[i], objectInspectorOption); } return out; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java index 987da3d..0291aec 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFRowNumber.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import java.util.ArrayList; +import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -34,110 +35,88 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.IntWritable; -@WindowFunctionDescription -( - description = @Description( - name = "row_number", - value = "_FUNC_() - The ROW_NUMBER function assigns a unique number (sequentially, starting from 1, as defined by ORDER BY) to each row within the partition." - ), - supportsWindow = false, - pivotResult = true +@WindowFunctionDescription( + description = @Description( + name = "row_number", + value = "_FUNC_() - The ROW_NUMBER function assigns a unique number (sequentially, starting from 1, as defined by ORDER BY) to each row within the partition." + ), + supportsWindow = false, + pivotResult = true ) -public class GenericUDAFRowNumber extends AbstractGenericUDAFResolver -{ - static final Log LOG = LogFactory.getLog(GenericUDAFRowNumber.class.getName()); - - @Override - public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) - throws SemanticException - { - if (parameters.length != 0) - { - throw new UDFArgumentTypeException(parameters.length - 1, - "No argument is expected."); - } - return new GenericUDAFRowNumberEvaluator(); - } - - static class RowNumberBuffer implements AggregationBuffer - { - ArrayList rowNums; - int nextRow; - - void init() - { - rowNums = new ArrayList(); - } - - RowNumberBuffer() - { - init(); - nextRow = 1; - } - - void incr() - { - rowNums.add(new IntWritable(nextRow++)); - } - } - - public static class GenericUDAFRowNumberEvaluator extends - GenericUDAFEvaluator - { - - @Override - public ObjectInspector init(Mode m, ObjectInspector[] parameters) - throws HiveException - { - super.init(m, parameters); - if (m != Mode.COMPLETE) - { - throw new HiveException("Only COMPLETE mode supported for row_number function"); - } - - return ObjectInspectorFactory.getStandardListObjectInspector( - PrimitiveObjectInspectorFactory.writableIntObjectInspector); - } - - @Override - public AggregationBuffer getNewAggregationBuffer() throws HiveException - { - return new RowNumberBuffer(); - } - - @Override - public void reset(AggregationBuffer agg) throws HiveException - { - ((RowNumberBuffer) agg).init(); - } - - @Override - public void iterate(AggregationBuffer agg, Object[] parameters) - throws HiveException - { - ((RowNumberBuffer) agg).incr(); - } - - @Override - public Object terminatePartial(AggregationBuffer agg) - throws HiveException - { - throw new HiveException("terminatePartial not supported"); - } - - @Override - public void merge(AggregationBuffer agg, Object partial) - throws HiveException - { - throw new HiveException("merge not supported"); - } - - @Override - public Object terminate(AggregationBuffer agg) throws HiveException - { - return ((RowNumberBuffer) agg).rowNums; - } - - } +public class GenericUDAFRowNumber extends AbstractGenericUDAFResolver { + + static final Log LOG = LogFactory.getLog(GenericUDAFRowNumber.class.getName()); + + @Override + public GenericUDAFEvaluator getEvaluator(TypeInfo[] parameters) throws SemanticException { + if (parameters.length != 0) { + throw new UDFArgumentTypeException(parameters.length - 1, "No argument is expected."); + } + return new GenericUDAFRowNumberEvaluator(); + } + + static class RowNumberBuffer implements AggregationBuffer { + + List rowNums; + int nextRow; + + void init() { + rowNums = new ArrayList(); + } + + RowNumberBuffer() { + init(); + nextRow = 1; + } + + void incr() { + rowNums.add(new IntWritable(nextRow++)); + } + } + + public static class GenericUDAFRowNumberEvaluator extends GenericUDAFEvaluator { + + @Override + public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveException { + super.init(m, parameters); + if (m != Mode.COMPLETE) { + throw new HiveException("Only COMPLETE mode supported for row_number function"); + } + + return ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + } + + @Override + public AggregationBuffer getNewAggregationBuffer() throws HiveException { + return new RowNumberBuffer(); + } + + @Override + public void reset(AggregationBuffer agg) throws HiveException { + ((RowNumberBuffer) agg).init(); + } + + @Override + public void iterate(AggregationBuffer agg, Object[] parameters) throws HiveException { + ((RowNumberBuffer) agg).incr(); + } + + @Override + public Object terminatePartial(AggregationBuffer agg) throws HiveException { + throw new HiveException("terminatePartial not supported"); + } + + @Override + public void merge(AggregationBuffer agg, Object partial) throws HiveException { + throw new HiveException("merge not supported"); + } + + @Override + public Object terminate(AggregationBuffer agg) throws HiveException { + return ((RowNumberBuffer) agg).rowNums; + } + + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java index 3545390..b5516ab 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java @@ -177,7 +177,7 @@ public ObjectInspector init(Mode m, ObjectInspector[] parameters) throws HiveExc double variance; // sum[x-avg^2] (this is actually n times the variance) @Override public int estimate() { return JavaDataModel.PRIMITIVES2 * 3; } - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java index 4a7caab..f565fa2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFnGrams.java @@ -334,7 +334,7 @@ public Object terminate(AggregationBuffer agg) throws HiveException { static class NGramAggBuf extends AbstractAggregationBuffer { NGramEstimator nge; int n; - }; + } @Override public AggregationBuffer getNewAggregationBuffer() throws HiveException { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java index e3fb558..ea632a8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDF.java @@ -46,13 +46,13 @@ public abstract class GenericUDF implements Closeable { /** - * A Defered Object allows us to do lazy-evaluation and short-circuiting. + * A deferred Object allows us to do lazy-evaluation and short-circuiting. * GenericUDF use DeferedObject to pass arguments. */ - public static interface DeferredObject { + public interface DeferredObject { void prepare(int version) throws HiveException; Object get() throws HiveException; - }; + } /** * A basic dummy implementation of DeferredObject which just stores a Java diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java index d6dab78..3c0933a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBasePad.java @@ -24,29 +24,30 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; -import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; public abstract class GenericUDFBasePad extends GenericUDF { - private transient Converter converter1; - private transient Converter converter2; - private transient Converter converter3; - private Text result = new Text(); - private String udfName; - public GenericUDFBasePad(String _udfName) { - this.udfName = _udfName; + private Converter converter1; + private Converter converter2; + private Converter converter3; + private final Text result = new Text(); + private final String udfName; + + protected GenericUDFBasePad(String udfName) { + this.udfName = udfName; } @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { if (arguments.length != 3) { - throw new UDFArgumentException(udfName + " requires three arguments. Found :" - + arguments.length); + throw new UDFArgumentException( + udfName + " requires three arguments. Found :" + arguments.length); } converter1 = checkArguments(arguments, 0); converter2 = checkArguments(arguments, 1); @@ -89,33 +90,34 @@ public String getDisplayString(String[] children) { } protected abstract void performOp(byte[] data, byte[] txt, byte[] padTxt, int len, Text str, - Text pad); + Text pad); - private Converter checkArguments(ObjectInspector[] arguments, int i) - throws UDFArgumentException { + private Converter checkArguments(ObjectInspector[] arguments, int i) throws UDFArgumentException { if (arguments[i].getCategory() != ObjectInspector.Category.PRIMITIVE) { throw new UDFArgumentTypeException(i + 1, "Only primitive type arguments are accepted but " - + arguments[i].getTypeName() + " is passed. as arguments"); + + arguments[i].getTypeName() + + " is passed. as arguments"); } PrimitiveCategory inputType = ((PrimitiveObjectInspector) arguments[i]).getPrimitiveCategory(); Converter converter; switch (inputType) { - case STRING: - case CHAR: - case VARCHAR: - converter = ObjectInspectorConverters.getConverter((PrimitiveObjectInspector) arguments[i], - PrimitiveObjectInspectorFactory.writableStringObjectInspector); - break; - case INT: - case SHORT: - case BYTE: - converter = ObjectInspectorConverters.getConverter((PrimitiveObjectInspector) arguments[i], - PrimitiveObjectInspectorFactory.writableIntObjectInspector); - break; - default: - throw new UDFArgumentTypeException(i + 1, udfName - + " only takes STRING/CHAR/INT/SHORT/BYTE/VARCHAR types as " + (i + 1) + "-ths argument, got " - + inputType); + case STRING: + case CHAR: + case VARCHAR: + converter = ObjectInspectorConverters.getConverter(arguments[i], + PrimitiveObjectInspectorFactory.writableStringObjectInspector); + break; + case INT: + case SHORT: + case BYTE: + converter = ObjectInspectorConverters.getConverter(arguments[i], + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + break; + default: + throw new UDFArgumentTypeException(i + 1, udfName + + " only takes STRING/CHAR/INT/SHORT/BYTE/VARCHAR" + + " types as " + (i + 1) + "-ths argument, got " + + inputType); } return converter; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseTrim.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseTrim.java index b3a9e67..dcd00ac 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseTrim.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseTrim.java @@ -28,6 +28,7 @@ import org.apache.hadoop.io.Text; public abstract class GenericUDFBaseTrim extends GenericUDF { + private transient TextConverter converter; private Text result = new Text(); private String udfName; @@ -39,24 +40,25 @@ public GenericUDFBaseTrim(String _udfName) { @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { if (arguments.length != 1) { - throw new UDFArgumentException(udfName + " requires one value argument. Found :" - + arguments.length); + throw new UDFArgumentException( + udfName + " requires one value argument. Found :" + arguments.length); } PrimitiveObjectInspector argumentOI; - if(arguments[0] instanceof PrimitiveObjectInspector) { + if (arguments[0] instanceof PrimitiveObjectInspector) { argumentOI = (PrimitiveObjectInspector) arguments[0]; } else { - throw new UDFArgumentException(udfName + " takes only primitive types. found " - + arguments[0].getTypeName()); + throw new UDFArgumentException( + udfName + " takes only primitive types. found " + arguments[0].getTypeName()); } switch (argumentOI.getPrimitiveCategory()) { - case STRING: - case CHAR: - case VARCHAR: - break; - default: - throw new UDFArgumentException(udfName + " takes only STRING/CHAR/VARCHAR types. Found " - + argumentOI.getPrimitiveCategory()); + case STRING: + case CHAR: + case VARCHAR: + break; + default: + throw new UDFArgumentException(udfName + + " takes only STRING/CHAR/VARCHAR types. Found " + + argumentOI.getPrimitiveCategory()); } converter = new TextConverter(argumentOI); return PrimitiveObjectInspectorFactory.writableStringObjectInspector; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java index 9d05e12..899a467 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java @@ -51,9 +51,10 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { boolean invert = (Boolean) ((PrimitiveObjectInspector) argumentOIs[0]) - .getPrimitiveJavaObject(arguments[0].get()); + .getPrimitiveJavaObject(arguments[0].get()); - BooleanWritable left = ((BooleanWritable)egt.evaluate(new DeferredObject[] {arguments[1], arguments[2]})); + BooleanWritable left = + (BooleanWritable)egt.evaluate(new DeferredObject[] {arguments[1], arguments[2]}); if (left == null) { return null; } @@ -61,7 +62,8 @@ public Object evaluate(DeferredObject[] arguments) throws HiveException { result.set(false); return result; } - BooleanWritable right = ((BooleanWritable)elt.evaluate(new DeferredObject[] {arguments[1], arguments[3]})); + BooleanWritable right = + (BooleanWritable)elt.evaluate(new DeferredObject[] {arguments[1], arguments[3]}); if (right == null) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateDiff.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateDiff.java index 5d8bd0d..531388c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateDiff.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDateDiff.java @@ -68,7 +68,7 @@ private transient PrimitiveCategory inputType1; private transient PrimitiveCategory inputType2; private IntWritable result = new IntWritable(); - + public GenericUDFDateDiff() { formatter.setTimeZone(TimeZone.getTimeZone("UTC")); } @@ -89,8 +89,8 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen @Override public IntWritable evaluate(DeferredObject[] arguments) throws HiveException { - output = evaluate(convertToDate(inputType1, inputConverter1, arguments[0]), - convertToDate(inputType2, inputConverter2, arguments[1])); + output = evaluate(convertToDate(inputType1, inputConverter1, arguments[0]), + convertToDate(inputType2, inputConverter2, arguments[1])); return output; } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDecode.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDecode.java index aed82b3..4d8473f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDecode.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFDecode.java @@ -44,8 +44,8 @@ @Description(name = "decode", value = "_FUNC_(bin, str) - Decode the first argument using the second argument character set", extended = "Possible options for the character set are 'US_ASCII', 'ISO-8859-1',\n" + - "'UTF-8', 'UTF-16BE', 'UTF-16LE', and 'UTF-16'. If either argument\n" + - "is null, the result will also be null") + "'UTF-8', 'UTF-16BE', 'UTF-16LE', and 'UTF-16'. If either argument\n" + + "is null, the result will also be null") public class GenericUDFDecode extends GenericUDF { private transient CharsetDecoder decoder = null; private transient BinaryObjectInspector bytesOI = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLpad.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLpad.java index 76ee94e..b1c5ffa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLpad.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLpad.java @@ -21,10 +21,6 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -/** - * UDFLpad. - * - */ @Description(name = "lpad", value = "_FUNC_(str, len, pad) - Returns str, left-padded with pad to a length of len", extended = "If str is longer than len, the return value is shortened to " @@ -46,7 +42,7 @@ protected void performOp(byte[] data, byte[] txt, byte[] padTxt, int len, Text s // Copy the padding for (int i = 0; i < pos; i += pad.getLength()) { for (int j = 0; j < pad.getLength() && j < pos - i; j++) { - data[i + j] = padTxt[j]; + data[i + j] = padTxt[j]; } } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRpad.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRpad.java index e436f3a..2d8f280 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRpad.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRpad.java @@ -21,10 +21,6 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -/** - * UDFRpad. - * - */ @Description(name = "rpad", value = "_FUNC_(str, len, pad) - " + "Returns str, right-padded with pad to a length of len", extended = "If str is longer than len, the return value is shortened to " @@ -48,7 +44,7 @@ protected void performOp(byte[] data, byte[] txt, byte[] padTxt, int len, Text s // Copy the padding while (pos < len) { for (int i = 0; i < pad.getLength() && i < len - pos; i++) { - data[pos + i] = padTxt[i]; + data[pos + i] = padTxt[i]; } pos += pad.getLength(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToBinary.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToBinary.java index e449e74..2aa495c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToBinary.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToBinary.java @@ -36,8 +36,8 @@ extended = "Currently only string, char, varchar or binary can be cast into binary") public class GenericUDFToBinary extends GenericUDF { - private transient PrimitiveObjectInspector argumentOI; - private transient BinaryConverter baConverter; + private PrimitiveObjectInspector argumentOI; + private BinaryConverter baConverter; @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { @@ -59,7 +59,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen || (argumentOI instanceof StringObjectInspector) || (argumentOI instanceof VoidObjectInspector))){ throw new UDFArgumentException("Only string, char, varchar or binary data can be cast into binary " + - "data types."); + "data types."); } baConverter = new BinaryConverter(argumentOI, PrimitiveObjectInspectorFactory.writableBinaryObjectInspector); @@ -78,7 +78,7 @@ public Object evaluate(DeferredObject[] arguments) throws HiveException { @Override public String getDisplayString(String[] children) { - assert (children.length == 1); + assert children.length == 1; StringBuilder sb = new StringBuilder(); sb.append("CAST( "); sb.append(children[0]); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java index 1f70c55..b4a746d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFUtils.java @@ -149,7 +149,7 @@ public boolean update(ObjectInspector oi) throws UDFArgumentTypeException { * that. */ if (commonTypeInfo instanceof DecimalTypeInfo) { - if ((!FunctionRegistry.isExactNumericType((PrimitiveTypeInfo) oiTypeInfo)) || + if ((!FunctionRegistry.isExactNumericType((PrimitiveTypeInfo) oiTypeInfo)) || (!FunctionRegistry.isExactNumericType((PrimitiveTypeInfo) rTypeInfo))) { commonTypeInfo = TypeInfoFactory.doubleTypeInfo; } @@ -367,7 +367,7 @@ public ConversionHelper(Method m, ObjectInspector[] parameterOIs) } return convertedParameters; } - }; + } /** * Helper class for UDFs returning string/varchar/char diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFJSONTuple.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFJSONTuple.java index 0336ecd..ea5aeec 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFJSONTuple.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFJSONTuple.java @@ -47,7 +47,7 @@ */ @Description(name = "json_tuple", value = "_FUNC_(jsonStr, p1, p2, ..., pn) - like get_json_object, but it takes multiple names and return a tuple. " + - "All the input parameters and output column types are string.") + "All the input parameters and output column types are string.") public class GenericUDTFJSONTuple extends GenericUDTF { @@ -106,7 +106,7 @@ public StructObjectInspector initialize(ObjectInspector[] args) if (numCols < 1) { throw new UDFArgumentException("json_tuple() takes at least two arguments: " + - "the json string and a path expression"); + "the json string and a path expression"); } for (int i = 0; i < args.length; ++i) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFParseUrlTuple.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFParseUrlTuple.java index dff9ba6..860acda 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFParseUrlTuple.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFParseUrlTuple.java @@ -56,7 +56,7 @@ enum PARTNAME { HOST, PATH, QUERY, REF, PROTOCOL, AUTHORITY, FILE, USERINFO, QUERY_WITH_KEY, NULLNAME - }; + } private static Log LOG = LogFactory.getLog(GenericUDTFParseUrlTuple.class.getName()); @@ -86,7 +86,7 @@ public StructObjectInspector initialize(ObjectInspector[] args) if (numCols < 1) { throw new UDFArgumentException("parse_url_tuple() takes at least two arguments: " + - "the url string and a part name"); + "the url string and a part name"); } for (int i = 0; i < args.length; ++i) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/ISupportStreamingModeForWindowing.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/ISupportStreamingModeForWindowing.java index 327a732..84959d8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/ISupportStreamingModeForWindowing.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/ISupportStreamingModeForWindowing.java @@ -33,14 +33,14 @@ public interface ISupportStreamingModeForWindowing { Object getNextResult(AggregationBuffer agg) throws HiveException; - + /* - * for functions that don't support a Window, this provides the rows remaining to be + * for functions that don't support a Window, this provides the rows remaining to be * added to output. Functions that return a Window can throw a UnsupportedException, * this method shouldn't be called. For Ranking fns return 0; lead/lag fns return the * lead/lag amt. */ int getRowsRemainingAfterTerminate() throws HiveException; - public static Object NULL_RESULT = new Object(); + Object NULL_RESULT = new Object(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.java index a8c875c..ca5ecaf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.java @@ -51,7 +51,7 @@ public int compareTo(Object other) { } return 0; } - }; + } // Class variables private int nbins; diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java index 8215fc0..5ab58a9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/MatchPath.java @@ -77,12 +77,11 @@ * "tpath" is available. Path is a collection of rows that represents the matching Path. * */ -public class MatchPath extends TableFunctionEvaluator -{ - private transient String patternStr; - private transient SymbolsInfo symInfo; - private transient String resultExprStr; - private transient SymbolFunction syFn; +public class MatchPath extends TableFunctionEvaluator { + private String patternStr; + private SymbolsInfo symInfo; + private String resultExprStr; + private SymbolFunction syFn; private ResultExprInfo resultExprInfo; /* * the names of the Columns of the input to MatchPath. Used to setup the tpath Struct column. @@ -90,10 +89,8 @@ private HashMap inputColumnNamesMap; @Override - public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws HiveException - { - while (pItr.hasNext()) - { + public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws HiveException { + while (pItr.hasNext()) { Object iRow = pItr.next(); SymbolFunctionResult syFnRes = SymbolFunction.match(syFn, iRow, pItr); @@ -112,8 +109,7 @@ public void execute(PTFPartitionIterator pItr, PTFPartition outP) throws } } - static void throwErrorWithSignature(String message) throws SemanticException - { + static void throwErrorWithSignature(String message) throws SemanticException { throw new SemanticException(String.format( "MatchPath signature is: SymbolPattern, one or more SymbolName, " + "expression pairs, the result expression as a select list. Error %s", @@ -225,7 +221,7 @@ private void validateAndSetupSymbolInfo(MatchPath evaluator, if ( symbolArgsSz % 2 != 0) { throwErrorWithSignature("Symbol Name, Expression need to be specified in pairs: " + - "there are odd number of symbol args"); + "there are odd number of symbol args"); } evaluator.symInfo = new SymbolsInfo(symbolArgsSz/2); @@ -253,7 +249,7 @@ private void validateAndSetupSymbolInfo(MatchPath evaluator, PrimitiveObjectInspector.PrimitiveCategory.BOOLEAN ) { throwErrorWithSignature(String.format("Currently a Symbol Expression(%s) " + - "must be a boolean expression", symolExprArg.getExpressionTreeString())); + "must be a boolean expression", symolExprArg.getExpressionTreeString())); } evaluator.symInfo.add(symbolName, symolExprArg); } @@ -379,8 +375,8 @@ void add(String name, PTFExpressionDef arg) public static class ResultExprInfo { ArrayList resultExprNames; ArrayList resultExprNodes; - private transient ArrayList resultExprEvals; - private transient StructObjectInspector resultOI; + private ArrayList resultExprEvals; + private StructObjectInspector resultOI; public ArrayList getResultExprNames() { return resultExprNames; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java index d7d2a34..c41ffbe 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java @@ -56,7 +56,7 @@ public void mismatch(DateWritable x, HiveDecimalWritable y) {} public void mismatch(TimestampWritable x, HiveDecimalWritable y) {} public void mismatch(BytesWritable x, DoubleWritable y) {} public void typeaffinity1(DateWritable x) {} - public void typeaffinity1(DoubleWritable x) {}; + public void typeaffinity1(DoubleWritable x) {} public void typeaffinity1(Text x) {} public void typeaffinity2(IntWritable x) {} public void typeaffinity2(DoubleWritable x) {} diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java index 50447e8..81c60d2 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java @@ -2028,9 +2028,9 @@ public void testAggregateCountStar ( } - public static interface Validator { + public interface Validator { void validate (String key, Object expected, Object result); - }; + } public static class ValueValidator implements Validator { @Override diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatch.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatch.java index 78cd5cd..132e2e0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatch.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizedRowBatch.java @@ -267,7 +267,7 @@ private void verifyFlatten(ColumnVector v) { try { b = "foo".getBytes("UTF-8"); } catch (Exception e) { - ; // eat it + // eat it } bv.setRef(0, b, 0, b.length); bv.flatten(true, sel, 2); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestCuckooSet.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestCuckooSet.java index 0ebfbb7..69ef164 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestCuckooSet.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestCuckooSet.java @@ -195,7 +195,7 @@ public void loadRandomBytes(byte[][] values, Random gen) { try { v = s.getBytes("UTF-8"); } catch (Exception e) { - ; // won't happen + // won't happen } return v; } @@ -207,7 +207,7 @@ public void loadRandomBytes(byte[][] values, Random gen) { try { values[i] = strings[i].getBytes("UTF-8"); } catch (Exception e) { - ; // can't happen + // can't happen } } return values; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java index fccac66..17a35b8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorConditionalExpressions.java @@ -160,7 +160,7 @@ private void setString(BytesColumnVector v, int i, String s) { try { b = s.getBytes("UTF-8"); } catch (Exception e) { - ; // eat it + // eat it } return b; } @@ -170,7 +170,7 @@ private String getString(BytesColumnVector v, int i) { try { s = new String(v.vector[i], v.start[i], v.length[i], "UTF-8"); } catch (Exception e) { - ; // eat it + // eat it } return s; } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java index 92e5a06..207ccce 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorFilterExpressions.java @@ -505,7 +505,7 @@ public void testFilterDoubleNotBetween() { b = "b".getBytes("UTF-8"); c = "c".getBytes("UTF-8"); } catch (Exception e) { - ; // won't happen + // won't happen } } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/udf/TestVectorUDFAdaptor.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/udf/TestVectorUDFAdaptor.java index a7567b7..8ad9757 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/udf/TestVectorUDFAdaptor.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/udf/TestVectorUDFAdaptor.java @@ -58,7 +58,7 @@ blue = "blue".getBytes("UTF-8"); red = "red".getBytes("UTF-8"); } catch (Exception e) { - ; // do nothing + // do nothing } } @@ -168,7 +168,6 @@ public void testMultiArgumentUDF() { result = "red:1:1.0".getBytes("UTF-8"); result2 = "blue:0:0.0".getBytes("UTF-8"); } catch (Exception e) { - ; } BytesColumnVector out = (BytesColumnVector) b.cols[3]; int cmp = StringExpr.compare(result, 0, result.length, out.vector[1], @@ -285,7 +284,6 @@ public void testGenericUDF() { red = "red".getBytes("UTF-8"); unknown = "UNKNOWN".getBytes("UTF-8"); } catch (Exception e) { - ; } BytesColumnVector out; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeCaptureOutputOperator.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeCaptureOutputOperator.java index 43458d9..4eb5b48 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeCaptureOutputOperator.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeCaptureOutputOperator.java @@ -36,17 +36,17 @@ public class FakeCaptureOutputOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; - + public interface OutputInspector { - public void inspectRow(Object row, int tag) throws HiveException; + void inspectRow(Object row, int tag) throws HiveException; } - + private OutputInspector outputInspector; - + public void setOutputInspector(OutputInspector outputInspector) { this.outputInspector = outputInspector; } - + public OutputInspector getOutputInspector() { return outputInspector; } @@ -67,7 +67,7 @@ public static FakeCaptureOutputOperator addCaptureOutputChild( return out; } - + public List getCapturedRows() { return rows; } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java index eab051e..0936b6c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/FakeVectorRowBatchFromObjectIterables.java @@ -54,7 +54,7 @@ /** * Helper interface for assigning values to primitive vector column types. */ - private static interface ColumnVectorAssign { + private interface ColumnVectorAssign { void assign( ColumnVector columnVector, int row, diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java index 5b8ec60..25f7fdb 100644 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreChecker.java @@ -159,7 +159,7 @@ public void testTableCheck() throws HiveException, MetaException, // now this shouldn't find the path on the fs result = new CheckResult(); checker.checkMetastore(dbName, tableName, null, result); - assertEquals(Collections.emptyList(), result.getTablesNotInMs());; + assertEquals(Collections.emptyList(), result.getTablesNotInMs()); assertEquals(1, result.getTablesNotOnFs().size()); assertEquals(tableName, result.getTablesNotOnFs().get(0)); assertEquals(Collections.emptyList(), result.getPartitionsNotOnFs()); diff --git ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index 7f5134e..90bf144 100644 --- ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -241,7 +241,7 @@ private String getLocation(String tableName, String partValue) { return location; } - private enum FileType {BASE, DELTA, LEGACY}; + private enum FileType {BASE, DELTA, LEGACY} private void addFile(HiveConf conf, Table t, Partition p, long minTxn, long maxTxn, int numRecords, FileType type, int numBuckets, diff --git ql/src/test/org/apache/hadoop/hive/ql/udaf/TestStreamingSum.java ql/src/test/org/apache/hadoop/hive/ql/udaf/TestStreamingSum.java index a331e66..19bb211 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udaf/TestStreamingSum.java +++ ql/src/test/org/apache/hadoop/hive/ql/udaf/TestStreamingSum.java @@ -120,10 +120,10 @@ public void sumHiveDecimal(Iterator inVals, int inSz, } - static interface TypeHandler { - public void set(T i, TW iw); + interface TypeHandler { + void set(T i, TW iw); - public T get(TW iw); + T get(TW iw); TypeHandler DoubleHandler = new TypeHandler() { public void set(Double d, DoubleWritable iw) { diff --git serde/src/java/org/apache/hadoop/hive/serde2/ByteStream.java serde/src/java/org/apache/hadoop/hive/serde2/ByteStream.java index 390d9de..87b7352 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/ByteStream.java +++ serde/src/java/org/apache/hadoop/hive/serde2/ByteStream.java @@ -95,12 +95,12 @@ public void reserve(int byteCount) { } } - public static interface RandomAccessOutput { - public void writeInt(long offset, int value); - public void reserve(int byteCount); - public void write(int b); - public void write(byte b[]) throws IOException; - public void write(byte b[], int off, int len); - public int getLength(); + public interface RandomAccessOutput { + void writeInt(long offset, int value); + void reserve(int byteCount); + void write(int b); + void write(byte b[]) throws IOException; + void write(byte b[], int off, int len); + int getLength(); } } diff --git serde/src/java/org/apache/hadoop/hive/serde2/SerDeStatsStruct.java serde/src/java/org/apache/hadoop/hive/serde2/SerDeStatsStruct.java index 21cd8d1..d8b1ab4 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/SerDeStatsStruct.java +++ serde/src/java/org/apache/hadoop/hive/serde2/SerDeStatsStruct.java @@ -23,6 +23,6 @@ /** * Rerurns the serialized size of the object. */ - public long getRawDataSerializedSize(); + long getRawDataSerializedSize(); } diff --git serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldRequiredness.java serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldRequiredness.java index bc85e8b..76644da 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldRequiredness.java +++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeFieldRequiredness.java @@ -29,7 +29,7 @@ */ public enum RequirednessTypes { Required, Skippable, Optional, - }; + } /** * Is this a required, skippable or optional field. Used by DynamicSerDe for diff --git serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeMap.java serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeMap.java index 2b5d63c..cdd5814 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeMap.java +++ serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/DynamicSerDeTypeMap.java @@ -159,4 +159,4 @@ public void serialize(Object o, ObjectInspector oi, TProtocol oprot) public byte getType() { return TType.MAP; } -}; +} diff --git serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java index 0ab27ff..ce42c78 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java +++ serde/src/java/org/apache/hadoop/hive/serde2/io/TimestampWritable.java @@ -55,9 +55,9 @@ * */ public class TimestampWritable implements WritableComparable { - static final private Log LOG = LogFactory.getLog(TimestampWritable.class); + private static final Log LOG = LogFactory.getLog(TimestampWritable.class); - static final public byte[] nullBytes = {0x0, 0x0, 0x0, 0x0}; + public static final byte[] nullBytes = {0x0, 0x0, 0x0, 0x0}; private static final int DECIMAL_OR_SECOND_VINT_FLAG = 0x80000000; private static final int LOWEST_31_BITS_OF_SEC_MASK = 0x7fffffff; @@ -587,7 +587,7 @@ private static boolean hasSecondVInt(byte b) { return WritableUtils.isNegativeVInt(b); } - private final boolean hasDecimalOrSecondVInt() { + private boolean hasDecimalOrSecondVInt() { return hasDecimalOrSecondVInt(currentBytes[offset]); } diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java index f7cfb36..c32b2ea 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java @@ -254,7 +254,7 @@ public VLong() { public long value; public byte length; - }; + } /** * Reads a zero-compressed encoded long from a byte array and returns it. @@ -293,7 +293,7 @@ public VInt() { public int value; public byte length; - }; + } public static final ThreadLocal threadLocalVInt = new ThreadLocal() { @Override diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/MapEqualComparer.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/MapEqualComparer.java index adde408..b74564c 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/MapEqualComparer.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/MapEqualComparer.java @@ -21,6 +21,5 @@ /* * Compare the two map objects for equality. */ - public int compare(Object o1, MapObjectInspector moi1, - Object o2, MapObjectInspector moi2); + int compare(Object o1, MapObjectInspector moi1, Object o2, MapObjectInspector moi2); } diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspector.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspector.java index d90560b..9db5af8 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspector.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspector.java @@ -37,13 +37,9 @@ */ public interface ObjectInspector extends Cloneable { - /** - * Category. - * - */ - public static enum Category { + enum Category { PRIMITIVE, LIST, MAP, STRUCT, UNION - }; + } /** * Returns the name of the data type that is inspected by this diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java index 8a42577..283781e 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorConverters.java @@ -51,7 +51,7 @@ /** * A converter which will convert objects with one ObjectInspector to another. */ - public static interface Converter { + public interface Converter { Object convert(Object input); } diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorFactory.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorFactory.java index 9a226b3..782053d 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorFactory.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorFactory.java @@ -58,7 +58,7 @@ */ public enum ObjectInspectorOptions { JAVA, THRIFT, PROTOCOL_BUFFERS - }; + } private static ConcurrentHashMap objectInspectorCache = new ConcurrentHashMap(); diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java index 05aed0a..d9f1349 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/PrimitiveObjectInspector.java @@ -29,12 +29,12 @@ /** * The primitive types supported by Hive. */ - public static enum PrimitiveCategory { + enum PrimitiveCategory { VOID, BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, STRING, DATE, TIMESTAMP, BINARY, DECIMAL, VARCHAR, CHAR, UNKNOWN - }; + } - public PrimitiveTypeInfo getTypeInfo(); + PrimitiveTypeInfo getTypeInfo(); /** * Get the primitive category of the PrimitiveObjectInspector. diff --git serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java index 5ccacf1..49f1d7c 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorUtils.java @@ -1116,10 +1116,10 @@ static Timestamp getTimestampFromString(String s) { /** * Provide a general grouping for each primitive data type. */ - public static enum PrimitiveGrouping { + public enum PrimitiveGrouping { NUMERIC_GROUP, STRING_GROUP, BOOLEAN_GROUP, DATE_GROUP, BINARY_GROUP, VOID_GROUP, UNKNOWN_GROUP - }; + } /** * Based on the PrimitiveCategory of a type, return the PrimitiveGrouping diff --git serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java index 63f3287..89d61fc 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java +++ serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java @@ -292,7 +292,7 @@ public String nextToken() throws EOFException { final String theRet = ret == null ? null : ret.toString(); return theRet; } - }; + } /** * The simple constructor which assumes ctl-a, ctl-b and '\n' separators and diff --git serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java index e5c9f18..8d36b64 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java +++ serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoFactory.java @@ -170,7 +170,7 @@ public static VarcharTypeInfo getVarcharTypeInfo(int length) { public static DecimalTypeInfo getDecimalTypeInfo(int precision, int scale) { String fullName = DecimalTypeInfo.getQualifiedName(precision, scale); return (DecimalTypeInfo) getPrimitiveTypeInfo(fullName); - }; + } public static TypeInfo getPrimitiveTypeInfoFromPrimitiveWritable( Class clazz) { diff --git serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java index 8dffe63..b3c5b08 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java @@ -253,7 +253,7 @@ public static boolean doPrimitiveCategoriesMatch(TypeInfo ti1, TypeInfo ti2) { public String toString() { return "" + position + ":" + text; } - }; + } private static boolean isTypeChar(char c) { return Character.isLetterOrDigit(c) || c == '_' || c == '.'; diff --git serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java index f8161da..44a048a 100644 --- serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java +++ serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java @@ -226,7 +226,8 @@ public void canSerializeUnions() throws SerDeException, IOException { assertEquals(dec.setScale(4), dec1); } - private enum enum1 {BLUE, RED , GREEN}; + private enum enum1 {BLUE, RED , GREEN} + @Test public void canSerializeEnums() throws SerDeException, IOException { String type = "{\"type\": \"enum\", \"name\": \"enum1_values\", " + diff --git service/src/java/org/apache/hive/service/Service.java service/src/java/org/apache/hive/service/Service.java index 2111837..b95077c 100644 --- service/src/java/org/apache/hive/service/Service.java +++ service/src/java/org/apache/hive/service/Service.java @@ -29,7 +29,7 @@ /** * Service states */ - public enum STATE { + enum STATE { /** Constructed but not initialized */ NOTINITED, diff --git service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java index e51d4f4..c6696e2 100644 --- service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java +++ service/src/java/org/apache/hive/service/auth/AuthenticationProviderFactory.java @@ -21,7 +21,7 @@ public class AuthenticationProviderFactory { - public static enum AuthMethods { + public enum AuthMethods { LDAP("LDAP"), PAM("PAM"), CUSTOM("CUSTOM"), diff --git service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java index 72b3e7e..41e97f9 100644 --- service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java +++ service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java @@ -47,7 +47,7 @@ public class HiveAuthFactory { private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class); - public static enum AuthTypes { + public enum AuthTypes { NOSASL("NOSASL"), NONE("NONE"), LDAP("LDAP"), @@ -65,7 +65,7 @@ public String getAuthName() { return authType; } - }; + } private HadoopThriftAuthBridge.Server saslServer = null; private String authTypeStr; diff --git service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java index 4b70558..83c4ee3 100644 --- service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java +++ service/src/java/org/apache/hive/service/auth/KerberosSaslHelper.java @@ -79,8 +79,8 @@ public static TTransport getKerberosTransport(String principal, String host, } } - public static TTransport createSubjectAssumedTransport(String principal, - TTransport underlyingTransport, Map saslProps) throws IOException { + public static TTransport createSubjectAssumedTransport(String principal, + TTransport underlyingTransport, Map saslProps) throws IOException { TTransport saslTransport = null; final String names[] = principal.split("[/@]"); try { diff --git service/src/java/org/apache/hive/service/auth/SaslQOP.java service/src/java/org/apache/hive/service/auth/SaslQOP.java index 0b2e7a2..820ef74 100644 --- service/src/java/org/apache/hive/service/auth/SaslQOP.java +++ service/src/java/org/apache/hive/service/auth/SaslQOP.java @@ -39,7 +39,7 @@ strToEnum.put(SaslQOP.toString(), SaslQOP); } - private SaslQOP(final String saslQop) { + SaslQOP(final String saslQop) { this.saslQop = saslQop; } diff --git service/src/java/org/apache/hive/service/cli/ICLIService.java service/src/java/org/apache/hive/service/cli/ICLIService.java index c569796..fb864cb 100644 --- service/src/java/org/apache/hive/service/cli/ICLIService.java +++ service/src/java/org/apache/hive/service/cli/ICLIService.java @@ -27,80 +27,77 @@ public interface ICLIService { - public abstract SessionHandle openSession(String username, String password, - Map configuration) + SessionHandle openSession(String username, String password, Map configuration) throws HiveSQLException; - public abstract SessionHandle openSessionWithImpersonation(String username, String password, - Map configuration, String delegationToken) + SessionHandle openSessionWithImpersonation(String username, String password, + Map configuration, String delegationToken) throws HiveSQLException; - public abstract void closeSession(SessionHandle sessionHandle) + void closeSession(SessionHandle sessionHandle) throws HiveSQLException; - public abstract GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) + GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) throws HiveSQLException; - public abstract OperationHandle executeStatement(SessionHandle sessionHandle, String statement, - Map confOverlay) + OperationHandle executeStatement(SessionHandle sessionHandle, String statement, + Map confOverlay) throws HiveSQLException; - public abstract OperationHandle executeStatementAsync(SessionHandle sessionHandle, - String statement, Map confOverlay) + OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, + Map confOverlay) throws HiveSQLException; - public abstract OperationHandle getTypeInfo(SessionHandle sessionHandle) + OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getCatalogs(SessionHandle sessionHandle) + OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getSchemas(SessionHandle sessionHandle, - String catalogName, String schemaName) + OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName) throws HiveSQLException; - public abstract OperationHandle getTables(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, List tableTypes) + OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName, + String tableName, List tableTypes) throws HiveSQLException; - public abstract OperationHandle getTableTypes(SessionHandle sessionHandle) + OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException; - public abstract OperationHandle getColumns(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, String columnName) + OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName, + String tableName, String columnName) throws HiveSQLException; - public abstract OperationHandle getFunctions(SessionHandle sessionHandle, - String catalogName, String schemaName, String functionName) + OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName, + String functionName) throws HiveSQLException; - public abstract OperationStatus getOperationStatus(OperationHandle opHandle) + OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException; - public abstract void cancelOperation(OperationHandle opHandle) + void cancelOperation(OperationHandle opHandle) throws HiveSQLException; - public abstract void closeOperation(OperationHandle opHandle) + void closeOperation(OperationHandle opHandle) throws HiveSQLException; - public abstract TableSchema getResultSetMetadata(OperationHandle opHandle) + TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException; - public abstract RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows) + RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) throws HiveSQLException; - public abstract RowSet fetchResults(OperationHandle opHandle) + RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException; - public abstract String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String owner, String renewer) throws HiveSQLException; + String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner, + String renewer) throws HiveSQLException; - public abstract void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException; + void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException; - public abstract void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException; + void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException; } diff --git service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java index 3a8a07f..e392c45 100644 --- service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java +++ service/src/java/org/apache/hive/service/cli/operation/TableTypeMapping.java @@ -27,18 +27,18 @@ * @param clientTypeName * @return */ - public String mapToHiveType (String clientTypeName); + String mapToHiveType(String clientTypeName); /** * Map hive's table type name to client's table type * @param clientTypeName * @return */ - public String mapToClientType (String hiveTypeName); + String mapToClientType(String hiveTypeName); /** * Get all the table types of this mapping * @return */ - public Set getTableTypeNames(); + Set getTableTypeNames(); } diff --git service/src/java/org/apache/hive/service/cli/session/HiveSession.java service/src/java/org/apache/hive/service/cli/session/HiveSession.java index 9785e95..cbedf9e 100644 --- service/src/java/org/apache/hive/service/cli/session/HiveSession.java +++ service/src/java/org/apache/hive/service/cli/session/HiveSession.java @@ -33,9 +33,9 @@ public interface HiveSession extends HiveSessionBase { - public void open(); + void open(); - public IMetaStoreClient getMetaStoreClient() throws HiveSQLException; + IMetaStoreClient getMetaStoreClient() throws HiveSQLException; /** * getInfo operation handler @@ -43,7 +43,7 @@ * @return * @throws HiveSQLException */ - public GetInfoValue getInfo(GetInfoType getInfoType) throws HiveSQLException; + GetInfoValue getInfo(GetInfoType getInfoType) throws HiveSQLException; /** * execute operation handler @@ -52,8 +52,8 @@ * @return * @throws HiveSQLException */ - public OperationHandle executeStatement(String statement, - Map confOverlay) throws HiveSQLException; + OperationHandle executeStatement(String statement, Map confOverlay) + throws HiveSQLException; /** * execute operation handler @@ -62,22 +62,22 @@ public OperationHandle executeStatement(String statement, * @return * @throws HiveSQLException */ - public OperationHandle executeStatementAsync(String statement, - Map confOverlay) throws HiveSQLException; + OperationHandle executeStatementAsync(String statement, Map confOverlay) + throws HiveSQLException; /** * getTypeInfo operation handler * @return * @throws HiveSQLException */ - public OperationHandle getTypeInfo() throws HiveSQLException; + OperationHandle getTypeInfo() throws HiveSQLException; /** * getCatalogs operation handler * @return * @throws HiveSQLException */ - public OperationHandle getCatalogs() throws HiveSQLException; + OperationHandle getCatalogs() throws HiveSQLException; /** * getSchemas operation handler @@ -86,7 +86,7 @@ public OperationHandle executeStatementAsync(String statement, * @return * @throws HiveSQLException */ - public OperationHandle getSchemas(String catalogName, String schemaName) + OperationHandle getSchemas(String catalogName, String schemaName) throws HiveSQLException; /** @@ -98,15 +98,15 @@ public OperationHandle getSchemas(String catalogName, String schemaName) * @return * @throws HiveSQLException */ - public OperationHandle getTables(String catalogName, String schemaName, - String tableName, List tableTypes) throws HiveSQLException; + OperationHandle getTables(String catalogName, String schemaName, String tableName, + List tableTypes) throws HiveSQLException; /** * getTableTypes operation handler * @return * @throws HiveSQLException */ - public OperationHandle getTableTypes() throws HiveSQLException ; + OperationHandle getTableTypes() throws HiveSQLException ; /** * getColumns operation handler @@ -117,8 +117,8 @@ public OperationHandle getTables(String catalogName, String schemaName, * @return * @throws HiveSQLException */ - public OperationHandle getColumns(String catalogName, String schemaName, - String tableName, String columnName) throws HiveSQLException; + OperationHandle getColumns(String catalogName, String schemaName, String tableName, + String columnName) throws HiveSQLException; /** * getFunctions operation handler @@ -128,33 +128,33 @@ public OperationHandle getColumns(String catalogName, String schemaName, * @return * @throws HiveSQLException */ - public OperationHandle getFunctions(String catalogName, String schemaName, - String functionName) throws HiveSQLException; + OperationHandle getFunctions(String catalogName, String schemaName, String functionName) + throws HiveSQLException; /** * close the session * @throws HiveSQLException */ - public void close() throws HiveSQLException; + void close() throws HiveSQLException; - public void cancelOperation(OperationHandle opHandle) throws HiveSQLException; + void cancelOperation(OperationHandle opHandle) throws HiveSQLException; - public void closeOperation(OperationHandle opHandle) throws HiveSQLException; + void closeOperation(OperationHandle opHandle) throws HiveSQLException; - public TableSchema getResultSetMetadata(OperationHandle opHandle) + TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException; - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) + RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows) throws HiveSQLException; - public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException; + RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException; - public String getDelegationToken(HiveAuthFactory authFactory, String owner, - String renewer) throws HiveSQLException; + String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) + throws HiveSQLException; - public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) + void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; - public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) + void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; } diff --git service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java index 4c3164e..3ae74fa 100644 --- service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java +++ service/src/java/org/apache/hive/service/cli/session/HiveSessionBase.java @@ -38,40 +38,40 @@ * Set the session manager for the session * @param sessionManager */ - public void setSessionManager(SessionManager sessionManager); + void setSessionManager(SessionManager sessionManager); /** * Get the session manager for the session */ - public SessionManager getSessionManager(); + SessionManager getSessionManager(); /** * Set operation manager for the session * @param operationManager */ - public void setOperationManager(OperationManager operationManager); + void setOperationManager(OperationManager operationManager); /** * Initialize the session * @param sessionConfMap */ - public void initialize(Map sessionConfMap); + void initialize(Map sessionConfMap); - public SessionHandle getSessionHandle(); + SessionHandle getSessionHandle(); - public String getUsername(); + String getUsername(); - public String getPassword(); + String getPassword(); - public HiveConf getHiveConf(); + HiveConf getHiveConf(); - public SessionState getSessionState(); + SessionState getSessionState(); - public String getUserName(); + String getUserName(); - public void setUserName(String userName); + void setUserName(String userName); - public String getIpAddress(); + String getIpAddress(); - public void setIpAddress(String ipAddress); + void setIpAddress(String ipAddress); } diff --git service/src/java/org/apache/hive/service/cli/session/HiveSessionHook.java service/src/java/org/apache/hive/service/cli/session/HiveSessionHook.java index 06388cc..7e4f2b0 100644 --- service/src/java/org/apache/hive/service/cli/session/HiveSessionHook.java +++ service/src/java/org/apache/hive/service/cli/session/HiveSessionHook.java @@ -33,5 +33,5 @@ * @param sessionHookContext context * @throws HiveSQLException */ - public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException; + void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException; } diff --git service/src/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java service/src/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java index 156c814..0a10dba 100644 --- service/src/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java +++ service/src/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java @@ -30,17 +30,17 @@ * Retrieve session conf * @return */ - public HiveConf getSessionConf(); + HiveConf getSessionConf(); /** * The get the username starting the session * @return */ - public String getSessionUser(); + String getSessionUser(); /** * Retrieve handle for the session * @return */ - public String getSessionHandle(); + String getSessionHandle(); } diff --git service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java index 98d75b5..25e75a6 100644 --- service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java @@ -71,7 +71,7 @@ public void run() { threadPool.setMaxThreads(maxWorkerThreads); httpServer.setThreadPool(threadPool); - SelectChannelConnector connector = new SelectChannelConnector();; + SelectChannelConnector connector = new SelectChannelConnector(); boolean useSsl = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL); String schemeName = useSsl ? "https" : "http"; String authType = hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION); diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/ZeroCopyShims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/ZeroCopyShims.java index 8de08ad..364d0f6 100644 --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/ZeroCopyShims.java +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/ZeroCopyShims.java @@ -43,12 +43,12 @@ public ByteBufferPoolAdapter(ByteBufferPoolShim pool) { } @Override - public final ByteBuffer getBuffer(boolean direct, int length) { + public ByteBuffer getBuffer(boolean direct, int length) { return this.pool.getBuffer(direct, length); } @Override - public final void putBuffer(ByteBuffer buffer) { + public void putBuffer(ByteBuffer buffer) { this.pool.putBuffer(buffer); } } @@ -56,9 +56,9 @@ public final void putBuffer(ByteBuffer buffer) { private static final class ZeroCopyAdapter implements ZeroCopyReaderShim { private final FSDataInputStream in; private final ByteBufferPoolAdapter pool; - private final static EnumSet CHECK_SUM = EnumSet + private static final EnumSet CHECK_SUM = EnumSet .noneOf(ReadOption.class); - private final static EnumSet NO_CHECK_SUM = EnumSet + private static final EnumSet NO_CHECK_SUM = EnumSet .of(ReadOption.SKIP_CHECKSUMS); public ZeroCopyAdapter(FSDataInputStream in, ByteBufferPoolShim poolshim) { @@ -70,7 +70,7 @@ public ZeroCopyAdapter(FSDataInputStream in, ByteBufferPoolShim poolshim) { } } - public final ByteBuffer readBuffer(int maxLength, boolean verifyChecksums) + public ByteBuffer readBuffer(int maxLength, boolean verifyChecksums) throws IOException { EnumSet options = NO_CHECK_SUM; if (verifyChecksums) { @@ -79,7 +79,7 @@ public final ByteBuffer readBuffer(int maxLength, boolean verifyChecksums) return this.in.read(this.pool, maxLength, options); } - public final void releaseBuffer(ByteBuffer buffer) { + public void releaseBuffer(ByteBuffer buffer) { this.in.releaseBuffer(buffer); } } @@ -106,18 +106,15 @@ public static DirectDecompressorShim getDirectDecompressor( DirectCompressionType codec) { DirectDecompressor decompressor = null; switch (codec) { - case ZLIB: { - decompressor = new ZlibDirectDecompressor(); - } - break; - case ZLIB_NOHEADER: { - decompressor = new ZlibDirectDecompressor(CompressionHeader.NO_HEADER, 0); - } - break; - case SNAPPY: { - decompressor = new SnappyDirectDecompressor(); - } - break; + case ZLIB: + decompressor = new ZlibDirectDecompressor(); + break; + case ZLIB_NOHEADER: + decompressor = new ZlibDirectDecompressor(CompressionHeader.NO_HEADER, 0); + break; + case SNAPPY: + decompressor = new SnappyDirectDecompressor(); + break; } if (decompressor != null) { return new DirectDecompressorAdapter(decompressor); diff --git shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenStore.java shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenStore.java index f3c2e48..23850ae 100644 --- shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenStore.java +++ shims/common-secure/src/main/java/org/apache/hadoop/hive/thrift/DelegationTokenStore.java @@ -33,7 +33,7 @@ /** * Exception for internal token store errors that typically cannot be handled by the caller. */ - public static class TokenStoreException extends RuntimeException { + class TokenStoreException extends RuntimeException { private static final long serialVersionUID = -8693819817623074083L; public TokenStoreException(Throwable cause) { diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandler.java shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandler.java index fba32fb..fee3ac3 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandler.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandler.java @@ -36,17 +36,13 @@ * @param e * @return RecordReader */ - public RecordReader handleRecordReaderCreationException(Exception e) + RecordReader handleRecordReaderCreationException(Exception e) throws IOException; /** * process exceptions thrown when calling rr's next - * - * @param e - * @param result - * @throws IOException */ - public void handleRecorReaderNextException(Exception e, - HiveIOExceptionNextHandleResult result) throws IOException; + void handleRecorReaderNextException(Exception e, HiveIOExceptionNextHandleResult result) + throws IOException; } diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index eefd5e5..9b462ec 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -70,7 +70,7 @@ */ public interface HadoopShims { - static final Log LOG = LogFactory.getLog(HadoopShims.class); + Log LOG = LogFactory.getLog(HadoopShims.class); /** * Constructs and Returns TaskAttempt Log Url @@ -86,19 +86,19 @@ String getTaskAttemptLogUrl(JobConf conf, /** * Returns a shim to wrap MiniMrCluster */ - public MiniMrShim getMiniMrCluster(Configuration conf, int numberOfTaskTrackers, - String nameNode, int numDir) throws IOException; + MiniMrShim getMiniMrCluster(Configuration conf, int numberOfTaskTrackers, String nameNode, + int numDir) throws IOException; - public MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers, - String nameNode, int numDir) throws IOException; + MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers, String nameNode, + int numDir) throws IOException; /** * Shim for MiniMrCluster */ - public interface MiniMrShim { - public int getJobTrackerPort() throws UnsupportedOperationException; - public void shutdown() throws IOException; - public void setupConfiguration(Configuration conf); + interface MiniMrShim { + int getJobTrackerPort() throws UnsupportedOperationException; + void shutdown() throws IOException; + void setupConfiguration(Configuration conf); } /** @@ -113,7 +113,7 @@ MiniDFSShim getMiniDfs(Configuration conf, /** * Shim around the functions in MiniDFSCluster that Hive uses. */ - public interface MiniDFSShim { + interface MiniDFSShim { FileSystem getFileSystem() throws IOException; void shutdown() throws IOException; @@ -126,7 +126,7 @@ MiniDFSShim getMiniDfs(Configuration conf, int createHadoopArchive(Configuration conf, Path parentDir, Path destDir, String archiveName) throws Exception; - public URI getHarUri(URI original, URI base, URI originalBase) + URI getHarUri(URI original, URI base, URI originalBase) throws URISyntaxException; /** * Hive uses side effect files exclusively for it's output. It also manages @@ -144,11 +144,11 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @return the unquoted string * */ - public String unquoteHtmlChars(String item); + String unquoteHtmlChars(String item); - public void closeAllForUGI(UserGroupInformation ugi); + void closeAllForUGI(UserGroupInformation ugi); /** * Get the UGI that the given job configuration will run as. @@ -156,7 +156,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * In secure versions of Hadoop, this simply returns the current * access control context's user, ignoring the configuration. */ - public UserGroupInformation getUGIForConf(Configuration conf) throws LoginException, IOException; + UserGroupInformation getUGIForConf(Configuration conf) throws LoginException, IOException; /** * Used by metastore server to perform requested rpc in client context. @@ -166,7 +166,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @throws IOException * @throws InterruptedException */ - public T doAs(UserGroupInformation ugi, PrivilegedExceptionAction pvea) throws + T doAs(UserGroupInformation ugi, PrivilegedExceptionAction pvea) throws IOException, InterruptedException; /** @@ -176,7 +176,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @return Return the name of environment variable used by hadoop to find * location of token file */ - public String getTokenFileLocEnvName(); + String getTokenFileLocEnvName(); /** @@ -186,7 +186,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @return Path of the file with token credential * @throws IOException */ - public Path createDelegationTokenFile(final Configuration conf) throws IOException; + Path createDelegationTokenFile(final Configuration conf) throws IOException; /** @@ -195,7 +195,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @param groupNames group names associated with remote user name * @return UGI created for the remote user. */ - public UserGroupInformation createRemoteUser(String userName, List groupNames); + UserGroupInformation createRemoteUser(String userName, List groupNames); /** * Get the short name corresponding to the subject in the passed UGI @@ -204,18 +204,18 @@ public URI getHarUri(URI original, URI base, URI originalBase) * undergoing the translation in the kerberos name rule mapping). * In unsecure versions of Hadoop, this returns the name of the subject */ - public String getShortUserName(UserGroupInformation ugi); + String getShortUserName(UserGroupInformation ugi); /** * Return true if the Shim is based on Hadoop Security APIs. */ - public boolean isSecureShimImpl(); + boolean isSecureShimImpl(); /** * Return true if the hadoop configuration has security enabled * @return */ - public boolean isSecurityEnabled(); + boolean isSecurityEnabled(); /** * Get the string form of the token given a token signature. @@ -232,7 +232,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @return the string form of the token found * @throws IOException */ - public String getTokenStrForm(String tokenSignature) throws IOException; + String getTokenStrForm(String tokenSignature) throws IOException; /** * Add a delegation token to the given ugi @@ -241,7 +241,7 @@ public URI getHarUri(URI original, URI base, URI originalBase) * @param tokenService * @throws IOException */ - public void setTokenStr(UserGroupInformation ugi, String tokenStr, String tokenService) + void setTokenStr(UserGroupInformation ugi, String tokenStr, String tokenService) throws IOException; /** @@ -251,10 +251,10 @@ public void setTokenStr(UserGroupInformation ugi, String tokenStr, String tokenS * @return * @throws IOException */ - public String addServiceToToken(String tokenStr, String tokenService) + String addServiceToToken(String tokenStr, String tokenService) throws IOException; - enum JobTrackerState { INITIALIZING, RUNNING }; + enum JobTrackerState { INITIALIZING, RUNNING } /** * Convert the ClusterStatus to its Thrift equivalent: JobTrackerState. @@ -263,20 +263,20 @@ public String addServiceToToken(String tokenStr, String tokenService) * @return the matching JobTrackerState * @throws Exception if no equivalent JobTrackerState exists */ - public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception; + JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception; - public TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable); + TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable); - public TaskAttemptID newTaskAttemptID(JobID jobId, boolean isMap, int taskId, int id); + TaskAttemptID newTaskAttemptID(JobID jobId, boolean isMap, int taskId, int id); - public JobContext newJobContext(Job job); + JobContext newJobContext(Job job); /** * Check wether MR is configured to run in local-mode * @param conf * @return */ - public boolean isLocalMode(Configuration conf); + boolean isLocalMode(Configuration conf); /** * All retrieval of jobtracker/resource manager rpc address @@ -284,7 +284,7 @@ public String addServiceToToken(String tokenStr, String tokenService) * @param conf * @return */ - public String getJobLauncherRpcAddress(Configuration conf); + String getJobLauncherRpcAddress(Configuration conf); /** * All updates to jobtracker/resource manager rpc address @@ -292,7 +292,7 @@ public String addServiceToToken(String tokenStr, String tokenService) * @param conf * @return */ - public void setJobLauncherRpcAddress(Configuration conf, String val); + void setJobLauncherRpcAddress(Configuration conf, String val); /** * All references to jobtracker/resource manager http address @@ -300,36 +300,36 @@ public String addServiceToToken(String tokenStr, String tokenService) * @param conf * @return */ - public String getJobLauncherHttpAddress(Configuration conf); + String getJobLauncherHttpAddress(Configuration conf); /** * Perform kerberos login using the given principal and keytab * @throws IOException */ - public void loginUserFromKeytab(String principal, String keytabFile) throws IOException; + void loginUserFromKeytab(String principal, String keytabFile) throws IOException; /** * Perform kerberos login using the given principal and keytab, * and return the UGI object * @throws IOException */ - public UserGroupInformation loginUserFromKeytabAndReturnUGI(String principal, - String keytabFile) throws IOException; + UserGroupInformation loginUserFromKeytabAndReturnUGI(String principal, String keytabFile) + throws IOException; /** * Perform kerberos re-login using the given principal and keytab, to renew * the credentials * @throws IOException */ - public void reLoginUserFromKeytab() throws IOException; + void reLoginUserFromKeytab() throws IOException; /*** * Check if the current UGI is keytab based * @return * @throws IOException */ - public boolean isLoginKeytabBased() throws IOException; + boolean isLoginKeytabBased() throws IOException; /** * Move the directory/file to trash. In case of the symlinks or mount points, the file is @@ -340,7 +340,7 @@ public UserGroupInformation loginUserFromKeytabAndReturnUGI(String principal, * @return false if the item is already in the trash or trash is disabled * @throws IOException */ - public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) throws IOException; /** @@ -350,7 +350,7 @@ public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration co * @param path * @return */ - public long getDefaultBlockSize(FileSystem fs, Path path); + long getDefaultBlockSize(FileSystem fs, Path path); /** * Get the default replication for a path. In case of CSMT the given path will be used to @@ -359,21 +359,21 @@ public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration co * @param path * @return */ - public short getDefaultReplication(FileSystem fs, Path path); + short getDefaultReplication(FileSystem fs, Path path); /** * Create the proxy ugi for the given userid * @param userName * @return */ - public UserGroupInformation createProxyUser(String userName) throws IOException; + UserGroupInformation createProxyUser(String userName) throws IOException; /** * Verify proxy access to given UGI for given user * @param ugi */ - public void authorizeProxyAccess(String proxyUser, UserGroupInformation realUserUgi, - String ipAddress, Configuration conf) throws IOException; + void authorizeProxyAccess(String proxyUser, UserGroupInformation realUserUgi, String ipAddress, + Configuration conf) throws IOException; /** * The method sets to set the partition file has a different signature between @@ -389,7 +389,7 @@ public void authorizeProxyAccess(String proxyUser, UserGroupInformation realUser * InputSplitShim. * */ - public interface InputSplitShim extends InputSplit { + interface InputSplitShim extends InputSplit { JobConf getJob(); @Override @@ -478,7 +478,7 @@ RecordReader getRecordReader(JobConf job, InputSplitShim split, Reporter reporte * @param stream the stream to hflush. * @throws IOException */ - public void hflush(FSDataOutputStream stream) throws IOException; + void hflush(FSDataOutputStream stream) throws IOException; /** * For a given file, return a file status @@ -488,7 +488,7 @@ RecordReader getRecordReader(JobConf job, InputSplitShim split, Reporter reporte * @return * @throws IOException */ - public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException; + HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException; /** * For a given file, set a given file status. @@ -498,44 +498,47 @@ RecordReader getRecordReader(JobConf job, InputSplitShim split, Reporter reporte * @param target * @throws IOException */ - public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, - FileSystem fs, Path target) throws IOException; + void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, FileSystem fs, + Path target) throws IOException; /** * Includes the vanilla FileStatus, and AclStatus if it applies to this version of hadoop. */ - public interface HdfsFileStatus { - public FileStatus getFileStatus(); - public void debugLog(); + interface HdfsFileStatus { + FileStatus getFileStatus(); + void debugLog(); } - public HCatHadoopShims getHCatShim(); - public interface HCatHadoopShims { + HCatHadoopShims getHCatShim(); + interface HCatHadoopShims { - enum PropertyName {CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK, CLASSPATH_ARCHIVES, CLASSPATH_FILES} + enum PropertyName { + CACHE_ARCHIVES, CACHE_FILES, CACHE_SYMLINK, CLASSPATH_ARCHIVES,CLASSPATH_FILES + } - public TaskID createTaskID(); + TaskID createTaskID(); - public TaskAttemptID createTaskAttemptID(); + TaskAttemptID createTaskAttemptID(); - public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf, - TaskAttemptID taskId); + org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf, + TaskAttemptID taskId); - public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf, - org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable); + org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(JobConf conf, + org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable); - public JobContext createJobContext(Configuration conf, JobID jobId); + JobContext createJobContext(Configuration conf, JobID jobId); - public org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, Progressable progressable); + org.apache.hadoop.mapred.JobContext createJobContext(JobConf conf, JobID jobId, + Progressable progressable); - public void commitJob(OutputFormat outputFormat, Job job) throws IOException; + void commitJob(OutputFormat outputFormat, Job job) throws IOException; - public void abortJob(OutputFormat outputFormat, Job job) throws IOException; + void abortJob(OutputFormat outputFormat, Job job) throws IOException; /* Referring to job tracker in 0.20 and resource manager in 0.23 */ - public InetSocketAddress getResourceManagerAddress(Configuration conf); + InetSocketAddress getResourceManagerAddress(Configuration conf); - public String getPropertyName(PropertyName name); + String getPropertyName(PropertyName name); /** * Checks if file is in HDFS filesystem. @@ -544,62 +547,62 @@ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, * @param path * @return true if the file is in HDFS, false if the file is in other file systems. */ - public boolean isFileInHDFS(FileSystem fs, Path path) throws IOException; + boolean isFileInHDFS(FileSystem fs, Path path) throws IOException; } /** * Provides a Hadoop JobTracker shim. * @param conf not {@code null} */ - public WebHCatJTShim getWebHCatShim(Configuration conf, UserGroupInformation ugi) throws IOException; - public interface WebHCatJTShim { + WebHCatJTShim getWebHCatShim(Configuration conf, UserGroupInformation ugi) throws IOException; + interface WebHCatJTShim { /** * Grab a handle to a job that is already known to the JobTracker. * * @return Profile of the job, or null if not found. */ - public JobProfile getJobProfile(org.apache.hadoop.mapred.JobID jobid) throws IOException; + JobProfile getJobProfile(org.apache.hadoop.mapred.JobID jobid) throws IOException; /** * Grab a handle to a job that is already known to the JobTracker. * * @return Status of the job, or null if not found. */ - public JobStatus getJobStatus(org.apache.hadoop.mapred.JobID jobid) throws IOException; + JobStatus getJobStatus(org.apache.hadoop.mapred.JobID jobid) throws IOException; /** * Kill a job. */ - public void killJob(org.apache.hadoop.mapred.JobID jobid) throws IOException; + void killJob(org.apache.hadoop.mapred.JobID jobid) throws IOException; /** * Get all the jobs submitted. */ - public JobStatus[] getAllJobs() throws IOException; + JobStatus[] getAllJobs() throws IOException; /** * Close the connection to the Job Tracker. */ - public void close(); + void close(); /** * Does exactly what org.apache.hadoop.mapreduce.Job#addCacheFile(URI) in Hadoop 2. * Assumes that both parameters are not {@code null}. */ - public void addCacheFile(URI uri, Job job); + void addCacheFile(URI uri, Job job); /** * Kills all jobs tagged with the given tag that have been started after the * given timestamp. */ - public void killJobs(String tag, long timestamp); + void killJobs(String tag, long timestamp); } /** * Create a proxy file system that can serve a given scheme/authority using some * other file system. */ - public FileSystem createProxyFileSystem(FileSystem fs, URI uri); + FileSystem createProxyFileSystem(FileSystem fs, URI uri); - public Map getHadoopConfNames(); + Map getHadoopConfNames(); /** * a hadoop.io ByteBufferPool shim. */ - public interface ByteBufferPoolShim { + interface ByteBufferPoolShim { /** * Get a new ByteBuffer from the pool. The pool can provide this from * removing a buffer from its internal cache, or by allocating a @@ -629,44 +632,46 @@ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, * * @return returns null if not supported */ - public ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in, ByteBufferPoolShim pool) throws IOException; + ZeroCopyReaderShim getZeroCopyReader(FSDataInputStream in, ByteBufferPoolShim pool) + throws IOException; - public interface ZeroCopyReaderShim { + interface ZeroCopyReaderShim { /** - * Get a ByteBuffer from the FSDataInputStream - this can be either a HeapByteBuffer or an MappedByteBuffer. + * Get a ByteBuffer from the FSDataInputStream - this can be either a HeapByteBuffer + * or an MappedByteBuffer. * Also move the in stream by that amount. The data read can be small than maxLength. * * @return ByteBuffer read from the stream, */ - public ByteBuffer readBuffer(int maxLength, boolean verifyChecksums) throws IOException; + ByteBuffer readBuffer(int maxLength, boolean verifyChecksums) throws IOException; /** * Release a ByteBuffer obtained from a read on the * Also move the in stream by that amount. The data read can be small than maxLength. * */ - public void releaseBuffer(ByteBuffer buffer); + void releaseBuffer(ByteBuffer buffer); } - public enum DirectCompressionType { + enum DirectCompressionType { NONE, ZLIB_NOHEADER, ZLIB, SNAPPY, - }; + } - public interface DirectDecompressorShim { - public void decompress(ByteBuffer src, ByteBuffer dst) throws IOException; + interface DirectDecompressorShim { + void decompress(ByteBuffer src, ByteBuffer dst) throws IOException; } - public DirectDecompressorShim getDirectDecompressor(DirectCompressionType codec); + DirectDecompressorShim getDirectDecompressor(DirectCompressionType codec); /** * Get configuration from JobContext */ - public Configuration getConfiguration(JobContext context); + Configuration getConfiguration(JobContext context); - public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException; + FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException; - public void getMergedCredentials(JobConf jobConf) throws IOException; + void getMergedCredentials(JobConf jobConf) throws IOException; } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/api/Status.java testutils/ptest2/src/main/java/org/apache/hive/ptest/api/Status.java index e9d0ee1..b891a7d 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/api/Status.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/api/Status.java @@ -50,14 +50,14 @@ public String toString() { return "Status [name=" + name + ", message=" + message + "]"; } - public static enum Name { + public enum Name { ILLEGAL_ARGUMENT(), QUEUE_FULL(), INTERNAL_ERROR(), PENDING(), IN_PROGRESS(), FAILED(), - OK(); + OK() } public static void assertOK(Status status) { if(!isOK(status)) { diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/api/response/GenericResponse.java testutils/ptest2/src/main/java/org/apache/hive/ptest/api/response/GenericResponse.java index 257988e..13e8718 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/api/response/GenericResponse.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/api/response/GenericResponse.java @@ -21,5 +21,5 @@ import org.apache.hive.ptest.api.Status; public interface GenericResponse { - public Status getStatus(); + Status getStatus(); } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/api/server/TestLogger.java testutils/ptest2/src/main/java/org/apache/hive/ptest/api/server/TestLogger.java index d8c6d2e..42cbfb5 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/api/server/TestLogger.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/api/server/TestLogger.java @@ -42,14 +42,14 @@ public TestLogger(PrintStream logFile, LEVEL level) { mDateFormatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS"); } - public static enum LEVEL { + public enum LEVEL { TRACE(1), DEBUG(2), INFO(3), WARN(4), ERROR(5); private int index; - private LEVEL(int index) { + LEVEL(int index) { this.index = index; } } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutorBuilder.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutorBuilder.java index 3603752..0825208 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutorBuilder.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/HostExecutorBuilder.java @@ -21,5 +21,5 @@ import org.apache.hive.ptest.execution.conf.Host; public interface HostExecutorBuilder { - public HostExecutor build(Host host); + HostExecutor build(Host host); } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/LocalCommand.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/LocalCommand.java index ec99656..b4be84e 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/LocalCommand.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/LocalCommand.java @@ -48,16 +48,16 @@ public int getExitCode() throws InterruptedException { return exitCode; } } - + public void kill() { synchronized (process) { process.destroy(); } } - public static interface OutputPolicy { - public void handleOutput(String line); - public void handleThrowable(Throwable throwable); + public interface OutputPolicy { + void handleOutput(String line); + void handleThrowable(Throwable throwable); } public static class CollectLogPolicy extends CollectPolicy { private final Logger logger; @@ -119,4 +119,4 @@ public void run() { } } } -} \ No newline at end of file +} diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestBatch.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestBatch.java index fc5a7c5..57c6bd5 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestBatch.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestBatch.java @@ -20,12 +20,12 @@ public interface TestBatch { - public String getTestArguments(); - - public String getTestClass(); + String getTestArguments(); - public String getName(); + String getTestClass(); - public boolean isParallel(); + String getName(); + + boolean isParallel(); } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/ExecutionContextProvider.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/ExecutionContextProvider.java index e0bbe5e..967e2f9 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/ExecutionContextProvider.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/ExecutionContextProvider.java @@ -21,18 +21,19 @@ import org.apache.hive.ptest.execution.conf.Context; public interface ExecutionContextProvider { - static final String PRIVATE_KEY = "privateKey"; + String PRIVATE_KEY = "privateKey"; - public ExecutionContext createExecutionContext() throws CreateHostsFailedException, ServiceNotAvailableException; + ExecutionContext createExecutionContext() + throws CreateHostsFailedException, ServiceNotAvailableException; - public void replaceBadHosts(ExecutionContext executionContext)throws CreateHostsFailedException ; + void replaceBadHosts(ExecutionContext executionContext)throws CreateHostsFailedException ; - public void terminate(ExecutionContext executionContext); + void terminate(ExecutionContext executionContext); - public void close(); + void close(); - public interface Builder { - public ExecutionContextProvider build(Context context, String workingDirectory) throws Exception; + interface Builder { + ExecutionContextProvider build(Context context, String workingDirectory) throws Exception; } } diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RSyncCommand.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RSyncCommand.java index 24e9d57..5f51ce3 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RSyncCommand.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RSyncCommand.java @@ -55,8 +55,8 @@ public String toString() { + getHost() + ", getInstance()=" + getInstance() + "]"; } - public static enum Type { + public enum Type { FROM_LOCAL(), - TO_LOCAL(); + TO_LOCAL() } -} \ No newline at end of file +} diff --git testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RemoteCommandResult.java testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RemoteCommandResult.java index 386866d..68c6186 100644 --- testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RemoteCommandResult.java +++ testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/ssh/RemoteCommandResult.java @@ -19,10 +19,10 @@ package org.apache.hive.ptest.execution.ssh; public interface RemoteCommandResult { - public int getExitCode(); - public Exception getException(); - public String getOutput(); - public String getUser(); - public String getHost(); - public int getInstance(); + int getExitCode(); + Exception getException(); + String getOutput(); + String getUser(); + String getHost(); + int getInstance(); }