diff --git beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java index 7e6846d..513d13c 100644 --- beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java +++ beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java @@ -61,7 +61,7 @@ public static final int DEFAULT_MAX_COLUMN_WIDTH = 50; public static final int DEFAULT_INCREMENTAL_BUFFER_ROWS = 1000; - public static String URL_ENV_PREFIX = "BEELINE_URL_"; + public static final String URL_ENV_PREFIX = "BEELINE_URL_"; private final BeeLine beeLine; private boolean autosave = false; diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java index 181f0d2..711f6a8 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java @@ -292,7 +292,7 @@ protected HiveConf getHiveConf() { // Derby commandline parser public static class DerbyCommandParser extends AbstractCommandParser { - private static String DERBY_NESTING_TOKEN = "RUN"; + private static final String DERBY_NESTING_TOKEN = "RUN"; public DerbyCommandParser(String dbOpts, String msUsername, String msPassword, HiveConf hiveConf) { @@ -380,11 +380,11 @@ public String cleanseCommand(String dbCommand) { // Postgres specific parser public static class PostgresCommandParser extends AbstractCommandParser { - private static String POSTGRES_NESTING_TOKEN = "\\i"; + private static final String POSTGRES_NESTING_TOKEN = "\\i"; @VisibleForTesting - public static String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings"; + public static final String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings"; @VisibleForTesting - public static String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81"; + public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81"; public PostgresCommandParser(String dbOpts, String msUsername, String msPassword, HiveConf hiveConf) { @@ -427,7 +427,7 @@ public boolean isNonExecCommand(String dbCommand) { //Oracle specific parser public static class OracleCommandParser extends AbstractCommandParser { - private static String ORACLE_NESTING_TOKEN = "@"; + private static final String ORACLE_NESTING_TOKEN = "@"; public OracleCommandParser(String dbOpts, String msUsername, String msPassword, HiveConf hiveConf) { @@ -451,7 +451,7 @@ public boolean isNestedScript(String dbCommand) { //MSSQL specific parser public static class MSSQLCommandParser extends AbstractCommandParser { - private static String MSSQL_NESTING_TOKEN = ":r"; + private static final String MSSQL_NESTING_TOKEN = ":r"; public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword, HiveConf hiveConf) { diff --git cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java index f1806a0..24550fa 100644 --- cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java +++ cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java @@ -54,7 +54,7 @@ // In verbose mode, print an update per RECORD_PRINT_INTERVAL records private static final int RECORD_PRINT_INTERVAL = (1024*1024); - protected static boolean test=false; + protected boolean test = false; public RCFileCat() { super(); @@ -63,12 +63,12 @@ public RCFileCat() { onUnmappableCharacter(CodingErrorAction.REPLACE); } - private static CharsetDecoder decoder; + private CharsetDecoder decoder; Configuration conf = null; - private static String TAB ="\t"; - private static String NEWLINE ="\r\n"; + private static final String TAB ="\t"; + private static final String NEWLINE ="\r\n"; @Override public int run(String[] args) throws Exception { @@ -243,7 +243,7 @@ public void setConf(Configuration conf) { this.conf = conf; } - private static String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " + + private static final String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " + "[--column-sizes | --column-sizes-pretty] [--file-sizes] fileName"; public static void main(String[] args) { @@ -262,7 +262,7 @@ public static void main(String[] args) { } } - private static void setupBufferedOutput() { + private void setupBufferedOutput() { OutputStream pdataOut; if (test) { pdataOut = System.out; @@ -275,6 +275,7 @@ private static void setupBufferedOutput() { new PrintStream(bos, false); System.setOut(ps); } + private static void printUsage(String errorMsg) { System.err.println(Usage); if(errorMsg != null) { diff --git cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java index 11ceb31..4cb4a19 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java @@ -25,8 +25,6 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; -import java.net.URI; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -77,7 +75,7 @@ public void testRCFileCat() throws Exception { writer.close(); RCFileCat fileCat = new RCFileCat(); - RCFileCat.test=true; + fileCat.test=true; fileCat.setConf(new Configuration()); // set fake input and output streams diff --git common/src/java/org/apache/hadoop/hive/common/LogUtils.java common/src/java/org/apache/hadoop/hive/common/LogUtils.java index c2a0d9a..01b2e7c 100644 --- common/src/java/org/apache/hadoop/hive/common/LogUtils.java +++ common/src/java/org/apache/hadoop/hive/common/LogUtils.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.core.impl.Log4jContextFactory; import org.slf4j.Logger; @@ -45,8 +44,8 @@ /** * Constants for log masking */ - private static String KEY_TO_MASK_WITH = "password"; - private static String MASKED_VALUE = "###_MASKED_###"; + private static final String KEY_TO_MASK_WITH = "password"; + private static final String MASKED_VALUE = "###_MASKED_###"; @SuppressWarnings("serial") public static class LogInitializationException extends Exception { diff --git common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java index 926b4a6..a9e17c2 100644 --- common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -49,7 +49,7 @@ public class StatsSetupConst { - protected final static Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName()); + protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName()); public enum StatDB { fs { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java index 9c30ee7..225e82a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java @@ -19,7 +19,6 @@ import java.io.BufferedReader; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileReader; import java.io.IOException; @@ -34,14 +33,13 @@ public class MetaStoreSchemaInfo { - private static String SQL_FILE_EXTENSION=".sql"; - private static String UPGRADE_FILE_PREFIX="upgrade-"; - private static String INIT_FILE_PREFIX="hive-schema-"; - private static String VERSION_UPGRADE_LIST = "upgrade.order"; - private static String PRE_UPGRADE_PREFIX = "pre-"; + private static final String SQL_FILE_EXTENSION=".sql"; + private static final String UPGRADE_FILE_PREFIX="upgrade-"; + private static final String INIT_FILE_PREFIX="hive-schema-"; + private static final String VERSION_UPGRADE_LIST = "upgrade.order"; + private static final String PRE_UPGRADE_PREFIX = "pre-"; private final String dbType; private final String hiveSchemaVersions[]; - private final HiveConf hiveConf; private final String hiveHome; // Some version upgrades often don't change schema. So they are equivalent to @@ -58,7 +56,6 @@ public MetaStoreSchemaInfo(String hiveHome, HiveConf hiveConf, String dbType) throws HiveMetaException { this.hiveHome = hiveHome; this.dbType = dbType; - this.hiveConf = hiveConf; // load upgrade order for the given dbType List upgradeOrderList = new ArrayList(); String upgradeListFile = getMetaStoreScriptDir() + File.separator + diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java index 6381a21..f7fad94 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java @@ -28,8 +28,6 @@ import java.util.List; import java.util.Map; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -40,7 +38,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.shims.HadoopShims; /** * ArchiveUtils. @@ -48,9 +45,7 @@ */ @SuppressWarnings("nls") public final class ArchiveUtils { - private static final Logger LOG = LoggerFactory.getLogger(ArchiveUtils.class.getName()); - - public static String ARCHIVING_LEVEL = "archiving_level"; + public static final String ARCHIVING_LEVEL = "archiving_level"; /** * PartSpecInfo keeps fields and values extracted from partial partition info diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 4ac25c2..b5f9424 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -764,7 +764,7 @@ public static TypeInfo getCommonClassForUnionAll(TypeInfo a, TypeInfo b) { * * @return null if no common class could be found. */ - public static TypeInfo getCommonClassForComparison(TypeInfo a, TypeInfo b) { + public static synchronized TypeInfo getCommonClassForComparison(TypeInfo a, TypeInfo b) { // If same return one of them if (a.equals(b)) { return a; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 6693134..79955e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -143,7 +143,6 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.util.Shell; import org.apache.hive.common.util.ACLConfigurationParser; import org.apache.hive.common.util.ReflectionUtil; import org.slf4j.Logger; @@ -213,11 +212,11 @@ * The object in the reducer are composed of these top level fields. */ - public static String HADOOP_LOCAL_FS = "file:///"; + public static final String HADOOP_LOCAL_FS = "file:///"; public static final String HADOOP_LOCAL_FS_SCHEME = "file"; - public static String MAP_PLAN_NAME = "map.xml"; - public static String REDUCE_PLAN_NAME = "reduce.xml"; - public static String MERGE_PLAN_NAME = "merge.xml"; + public static final String MAP_PLAN_NAME = "map.xml"; + public static final String REDUCE_PLAN_NAME = "reduce.xml"; + public static final String MERGE_PLAN_NAME = "merge.xml"; public static final String INPUT_NAME = "iocontext.input.name"; public static final String HAS_MAP_WORK = "has.map.work"; public static final String HAS_REDUCE_WORK = "has.reduce.work"; @@ -226,11 +225,11 @@ public static final String HIVE_ADDED_JARS = "hive.added.jars"; public static final String VECTOR_MODE = "VECTOR_MODE"; public static final String USE_VECTORIZED_INPUT_FILE_FORMAT = "USE_VECTORIZED_INPUT_FILE_FORMAT"; - public static String MAPNAME = "Map "; - public static String REDUCENAME = "Reducer "; + public static final String MAPNAME = "Map "; + public static final String REDUCENAME = "Reducer "; @Deprecated - protected static String DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX = "mapred.dfsclient.parallelism.max"; + protected static final String DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX = "mapred.dfsclient.parallelism.max"; /** * ReduceField: @@ -690,8 +689,8 @@ protected void initialize(Class type, Object oldInstance, Object newInstance, En // Note: When DDL supports specifying what string to represent null, // we should specify "NULL" to represent null in the temp table, and then // we can make the following translation deprecated. - public static String nullStringStorage = "\\N"; - public static String nullStringOutput = "NULL"; + public static final String nullStringStorage = "\\N"; + public static final String nullStringOutput = "NULL"; public static Random randGen = new Random(); @@ -2538,7 +2537,7 @@ public static void setColumnTypeList(JobConf jobConf, Operator op, boolean exclu setColumnTypeList(jobConf, rowSchema, excludeVCs); } - public static String suffix = ".hashtable"; + public static final String suffix = ".hashtable"; public static Path generatePath(Path basePath, String dumpFilePrefix, Byte tag, String bigBucketFileName) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index 5b0c2bf..2e27fd5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -1359,7 +1359,7 @@ private String getNewInstanceArgumentString(Object [] args) { return "arguments: " + Arrays.toString(args) + ", argument classes: " + argClasses.toString(); } - private static int STACK_LENGTH_LIMIT = 15; + private static final int STACK_LENGTH_LIMIT = 15; public static String getStackTraceAsSingleLine(Throwable e) { StringBuilder sb = new StringBuilder(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java index 6383e8a..266365e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/CuckooSetBytes.java @@ -39,8 +39,8 @@ private int salt = 0; private Random gen = new Random(676983475); private int rehashCount = 0; - private static long INT_MASK = 0x00000000ffffffffL; - private static long BYTE_MASK = 0x00000000000000ffL; + private static final long INT_MASK = 0x00000000ffffffffL; + private static final long BYTE_MASK = 0x00000000000000ffL; /** * Allocate a new set to hold expectedSize values. Re-allocation to expand diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java index 9030e5f..b6db3bc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTable.java @@ -40,10 +40,10 @@ protected int metricExpands; // 2^30 (we cannot use Integer.MAX_VALUE which is 2^31-1). - public static int HIGHEST_INT_POWER_OF_2 = 1073741824; + public static final int HIGHEST_INT_POWER_OF_2 = 1073741824; - public static int ONE_QUARTER_LIMIT = HIGHEST_INT_POWER_OF_2 / 4; - public static int ONE_SIXTH_LIMIT = HIGHEST_INT_POWER_OF_2 / 6; + public static final int ONE_QUARTER_LIMIT = HIGHEST_INT_POWER_OF_2 / 4; + public static final int ONE_SIXTH_LIMIT = HIGHEST_INT_POWER_OF_2 / 6; public void throwExpandError(int limit, String dataTypeName) { throw new RuntimeException( diff --git ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 6582cdd..c23d202 100644 --- ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -46,13 +46,12 @@ * Each session uses a new object, which creates a new file. */ public class HiveHistoryImpl implements HiveHistory{ + private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.HiveHistoryImpl"); PrintWriter histStream; // History File stream String histFileName; // History file name - private static final Logger LOG = LoggerFactory.getLogger("hive.ql.exec.HiveHistoryImpl"); - private static final Random randGen = new Random(); private LogHelper console; @@ -305,7 +304,7 @@ public void progressTask(String queryId, Task task) { /** * write out counters. */ - static ThreadLocal> ctrMapFactory = + static final ThreadLocal> ctrMapFactory = new ThreadLocal>() { @Override protected Map initialValue() { diff --git ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java index a1408e9..2c3ba7f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java +++ ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndex.java @@ -26,10 +26,8 @@ * Holds index related constants */ public class HiveIndex { - public static final Logger l4j = LoggerFactory.getLogger("HiveIndex"); - - public static String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime"; + public static final String INDEX_TABLE_CREATETIME = "hive.index.basetbl.dfs.lastModifiedTime"; public static enum IndexType { AGGREGATE_TABLE("aggregate", AggregateIndexHandler.class.getName()), diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java index 7727114..cc69c7e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java @@ -87,7 +87,7 @@ public static class FileChecker { // we don't have many file formats that implement InputFormatChecker. We won't be holding // multiple instances of such classes - private static int MAX_CACHE_SIZE = 16; + private static final int MAX_CACHE_SIZE = 16; // immutable maps Map, Class> inputFormatCheckerMap; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 4995bdf..010b88c 100755 --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -83,14 +83,13 @@ */ public class HiveInputFormat implements InputFormat, JobConfigurable { - private static final String CLASS_NAME = HiveInputFormat.class.getName(); private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); /** * A cache of InputFormat instances. */ - private static Map> inputFormats + private static final Map> inputFormats = new ConcurrentHashMap>(); private JobConf job; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java index d391164..f41edc4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java @@ -839,7 +839,7 @@ public static Metadata createMetadata(Text... values) { // the max size of memory for buffering records before writes them out private int columnsBufferSize = 4 * 1024 * 1024; // 4M // the conf string for COLUMNS_BUFFER_SIZE - public static String COLUMNS_BUFFER_SIZE_CONF_STR = "hive.io.rcfile.record.buffer.size"; + public static final String COLUMNS_BUFFER_SIZE_CONF_STR = "hive.io.rcfile.record.buffer.size"; // how many records already buffered private int bufferedRecords = 0; diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 369584b..389bce5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -158,7 +158,7 @@ } private static final Logger LOG = LoggerFactory.getLogger(OrcInputFormat.class); - private static boolean isDebugEnabled = LOG.isDebugEnabled(); + private static final boolean isDebugEnabled = LOG.isDebugEnabled(); static final HadoopShims SHIMS = ShimLoader.getHadoopShims(); private static final long DEFAULT_MIN_SPLIT_SIZE = 16 * 1024 * 1024; @@ -1914,10 +1914,6 @@ public float getProgress() throws IOException { } } - // The schema type description does not include the ACID fields (i.e. it is the - // non-ACID original schema). - private static boolean SCHEMA_TYPES_IS_ORIGINAL = true; - @Override public RowReader getReader(InputSplit inputSplit, Options options) diff --git ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java index 90b1dff..77bce97 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java @@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; @@ -58,7 +57,6 @@ import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.JobClient; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RunningJob; @@ -77,8 +75,6 @@ @SuppressWarnings( { "deprecation"}) public class PartialScanTask extends Task implements Serializable, HadoopJobExecHook { - - private static final long serialVersionUID = 1L; protected transient JobConf job; @@ -274,7 +270,7 @@ public String getName() { return "RCFile Statistics Partial Scan"; } - public static String INPUT_SEPERATOR = ":"; + public static final String INPUT_SEPERATOR = ":"; public static void main(String[] args) { String inputPathStr = null; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java index 044d64c..2435bf1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java @@ -60,7 +60,7 @@ */ GROUPINGID("GROUPING__ID", TypeInfoFactory.intTypeInfo); - public static ImmutableSet VIRTUAL_COLUMN_NAMES = + public static final ImmutableSet VIRTUAL_COLUMN_NAMES = ImmutableSet.of(FILENAME.getName(), BLOCKOFFSET.getName(), ROWOFFSET.getName(), RAWDATASIZE.getName(), GROUPINGID.getName(), ROWID.getName()); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 0e67ea6..88bf829 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -130,11 +130,7 @@ * map-reduce tasks. */ public final class GenMapRedUtils { - private static Logger LOG; - - static { - LOG = LoggerFactory.getLogger("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils"); - } + private static final Logger LOG = LoggerFactory.getLogger("org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils"); public static boolean needsTagging(ReduceWork rWork) { return rWork != null && (rWork.getReducer().getClass() == JoinOperator.class || diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java index 4d3e74e..88b8119 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java @@ -37,10 +37,10 @@ public final class ListBucketingPrunerUtils { /* Default list bucketing directory name. internal use only not for client. */ - public static String HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME = + public static final String HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME = "HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME"; /* Default list bucketing directory key. internal use only not for client. */ - public static String HIVE_LIST_BUCKETING_DEFAULT_KEY = "HIVE_DEFAULT_LIST_BUCKETING_KEY"; + public static final String HIVE_LIST_BUCKETING_DEFAULT_KEY = "HIVE_DEFAULT_LIST_BUCKETING_KEY"; /** * Decide if pruner skips the skewed directory diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java index 93202c3..93b8a5d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java @@ -383,11 +383,11 @@ public static boolean skewJoinEnabled(HiveConf conf, JoinOperator joinOp) { return true; } - private static String skewJoinPrefix = "hive_skew_join"; - private static String UNDERLINE = "_"; - private static String BIGKEYS = "bigkeys"; - private static String SMALLKEYS = "smallkeys"; - private static String RESULTS = "results"; + private static final String skewJoinPrefix = "hive_skew_join"; + private static final String UNDERLINE = "_"; + private static final String BIGKEYS = "bigkeys"; + private static final String SMALLKEYS = "smallkeys"; + private static final String RESULTS = "results"; static Path getBigKeysDir(Path baseDir, Byte srcTbl) { return StringInternUtils.internUriStringsInPath( diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 50eda15..2cc5e24 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -37,7 +37,6 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.calcite.util.Pair; -import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,8 +44,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.*; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; -import org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; import org.apache.hadoop.hive.ql.exec.spark.SparkTask; import org.apache.hadoop.hive.ql.exec.tez.TezTask; @@ -70,10 +67,8 @@ import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type; import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping; import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping; -import org.apache.hadoop.hive.ql.exec.vector.VectorFilterOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator; -import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext.HiveVectorAdaptorUsageMode; import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext.InConstantType; @@ -81,7 +76,6 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.IdentityExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression; -import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; @@ -107,16 +101,13 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.GroupByDesc; -import org.apache.hadoop.hive.ql.plan.HashTableSinkDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.LimitDesc; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.MapWork; -import org.apache.hadoop.hive.ql.plan.MapredLocalWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.SelectDesc; @@ -130,7 +121,6 @@ import org.apache.hadoop.hive.ql.plan.VectorSparkPartitionPruningSinkDesc; import org.apache.hadoop.hive.ql.plan.VectorLimitDesc; import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo; -import org.apache.hadoop.hive.ql.plan.VectorPartitionConversion; import org.apache.hadoop.hive.ql.plan.VectorSMBJoinDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; @@ -149,7 +139,6 @@ import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableKind; import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.OperatorVariation; import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc.VectorDeserializeType; -import org.apache.hadoop.hive.ql.plan.VectorMapJoinInfo; import org.apache.hadoop.hive.ql.plan.VectorReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.VectorReduceSinkInfo; import org.apache.hadoop.hive.ql.plan.VectorPartitionDesc; @@ -198,14 +187,11 @@ import org.apache.hadoop.hive.ql.udf.UDFYear; import org.apache.hadoop.hive.ql.udf.generic.*; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.NullStructSerDe; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -219,8 +205,6 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hive.common.util.AnnotationUtils; -import org.apache.hive.common.util.HiveStringUtils; -import org.apache.hive.common.util.ReflectionUtil; import org.apache.hadoop.util.ReflectionUtils; import com.google.common.base.Preconditions; @@ -264,16 +248,12 @@ supportedDataTypesPattern = Pattern.compile(patternBuilder.toString()); } - private List> vectorizableTasks = - new ArrayList>(); private Set> supportedGenericUDFs = new HashSet>(); private Set supportedAggregationUdfs = new HashSet(); private HiveConf hiveConf; - private boolean isSpark; - private boolean useVectorizedInputFileFormat; private boolean useVectorDeserialize; private boolean useRowDeserialize; @@ -456,8 +436,6 @@ public Vectorizer() { Set> nonVectorizedOps; - TableScanOperator tableScanOperator; - VectorTaskColumnInfo() { partitionColumnCount = 0; } @@ -500,7 +478,6 @@ public void setNonVectorizedOps(Set> nonVectori this.nonVectorizedOps = nonVectorizedOps; } public void setTableScanOperator(TableScanOperator tableScanOperator) { - this.tableScanOperator = tableScanOperator; } public Set> getNonVectorizedOps() { @@ -540,10 +517,7 @@ public void transferToBaseWork(BaseWork baseWork) { class VectorizationDispatcher implements Dispatcher { - private final PhysicalContext physicalContext; - public VectorizationDispatcher(PhysicalContext physicalContext) { - this.physicalContext = physicalContext; } @Override @@ -1519,14 +1493,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, class MapWorkVectorizationNodeProcessor extends VectorizationNodeProcessor { - private final MapWork mWork; private final VectorTaskColumnInfo vectorTaskColumnInfo; private final boolean isTezOrSpark; public MapWorkVectorizationNodeProcessor(MapWork mWork, boolean isTezOrSpark, VectorTaskColumnInfo vectorTaskColumnInfo) { super(vectorTaskColumnInfo, vectorTaskColumnInfo.getNonVectorizedOps()); - this.mWork = mWork; this.vectorTaskColumnInfo = vectorTaskColumnInfo; this.isTezOrSpark = isTezOrSpark; } @@ -1684,8 +1656,6 @@ public PhysicalContext resolve(PhysicalContext physicalContext) throws SemanticE return physicalContext; } - isSpark = (HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")); - useVectorizedInputFileFormat = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_USE_VECTORIZED_INPUT_FILE_FORMAT); @@ -2579,8 +2549,6 @@ private boolean canSpecializeMapJoin(Operator op, MapJoi List keyDesc = desc.getKeys().get(posBigTable); VectorExpression[] allBigTableKeyExpressions = vContext.getVectorExpressions(keyDesc); final int allBigTableKeyExpressionsLength = allBigTableKeyExpressions.length; - boolean isEmptyKey = (allBigTableKeyExpressionsLength == 0); - boolean supportsKeyTypes = true; // Assume. HashSet notSupportedKeyTypes = new HashSet(); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java index e0a6198..30fdd30 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/VectorizerReason.java @@ -27,7 +27,7 @@ */ public class VectorizerReason { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public static enum VectorizerNodeIssue { NONE, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 36009bf..0b07b78 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -119,10 +119,10 @@ // whether any ACID table is involved in a query protected boolean acidInQuery; - public static int HIVE_COLUMN_ORDER_ASC = 1; - public static int HIVE_COLUMN_ORDER_DESC = 0; - public static int HIVE_COLUMN_NULLS_FIRST = 0; - public static int HIVE_COLUMN_NULLS_LAST = 1; + public static final int HIVE_COLUMN_ORDER_ASC = 1; + public static final int HIVE_COLUMN_ORDER_DESC = 0; + public static final int HIVE_COLUMN_NULLS_FIRST = 0; + public static final int HIVE_COLUMN_NULLS_LAST = 1; /** * ReadEntities that are passed to the hooks. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index f175663..fc13292 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1808,7 +1808,7 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) static class QualifiedNameUtil { // delimiter to check DOT delimited qualified names - static String delimiter = "\\."; + static final String delimiter = "\\."; /** * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java index 01b5559..9b03d05 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java @@ -586,7 +586,7 @@ public String toString() */ public static class BoundarySpec implements Comparable { - public static int UNBOUNDED_AMOUNT = Integer.MAX_VALUE; + public static final int UNBOUNDED_AMOUNT = Integer.MAX_VALUE; Direction direction; int amt; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java index e85a418..48b7f92 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractVectorDesc.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hive.ql.plan; -import org.apache.hadoop.hive.ql.exec.Operator; - public class AbstractVectorDesc implements VectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private Class vectorOpClass; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java index 0b49294..38a9ef2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java @@ -50,7 +50,7 @@ * MERGEPARTIAL: FINAL for non-distinct aggregations, COMPLETE for distinct * aggregations. */ - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; /** * Mode. diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index ca69697..940630c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; @@ -32,13 +31,10 @@ import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.exec.vector.VectorColumnOutputMapping; -import org.apache.hadoop.hive.ql.exec.vector.VectorColumnSourceMapping; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.plan.Explain.Vectorization; import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.OperatorVariation; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; /** * Map Join operator Descriptor implementation. @@ -392,7 +388,7 @@ public void setDynamicPartitionHashJoin(boolean isDistributedHashJoin) { } // Use LinkedHashSet to give predictable display order. - private static Set vectorizableMapJoinNativeEngines = + private static final Set vectorizableMapJoinNativeEngines = new LinkedHashSet(Arrays.asList("tez", "spark")); public class MapJoinOperatorExplainVectorization extends OperatorExplainVectorization { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java index 9ae30ab..11e9c20 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java @@ -482,7 +482,7 @@ public void setHasOrderBy(boolean hasOrderBy) { } // Use LinkedHashSet to give predictable display order. - private static Set vectorizableReduceSinkNativeEngines = + private static final Set vectorizableReduceSinkNativeEngines = new LinkedHashSet(Arrays.asList("tez", "spark")); public class ReduceSinkOperatorExplainVectorization extends OperatorExplainVectorization { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java index 2e11321..5aaf2ea 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorAppMasterEventDesc.java @@ -28,7 +28,7 @@ */ public class VectorAppMasterEventDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorAppMasterEventDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java index 325ac91..5a00e5d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFileSinkDesc.java @@ -28,7 +28,7 @@ */ public class VectorFileSinkDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorFileSinkDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java index 6feed84..b457f44 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorFilterDesc.java @@ -30,7 +30,7 @@ */ public class VectorFilterDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private VectorExpression predicateExpression; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java index f8554e2..f9a8725 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorGroupByDesc.java @@ -31,7 +31,7 @@ */ public class VectorGroupByDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; /** * GLOBAL No key. All rows --> 1 full aggregation on end of input diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java index c9bc45a..4093800 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorLimitDesc.java @@ -28,7 +28,7 @@ */ public class VectorLimitDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorLimitDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java index 3aa65d3..60400de 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinDesc.java @@ -35,7 +35,7 @@ */ public class VectorMapJoinDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public static enum HashTableImplementationType { NONE, diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java index 9429785..7432efa 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorMapJoinInfo.java @@ -36,7 +36,7 @@ */ public class VectorMapJoinInfo { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private int[] bigTableKeyColumnMap; private String[] bigTableKeyColumnNames; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorPartitionDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorPartitionDesc.java index 4078c7d..bb8f5e1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorPartitionDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorPartitionDesc.java @@ -34,7 +34,7 @@ */ public class VectorPartitionDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; // Data Type Conversion Needed? // diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java index 2eb44b8..445dcca 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkDesc.java @@ -28,7 +28,7 @@ */ public class VectorReduceSinkDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public static enum ReduceSinkKeyType { NONE, diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkInfo.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkInfo.java index 8c35415..da6e606 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkInfo.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorReduceSinkInfo.java @@ -33,7 +33,7 @@ */ public class VectorReduceSinkInfo { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private int[] reduceSinkKeyColumnMap; private TypeInfo[] reduceSinkKeyTypeInfos; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java index 031f11e..41919b6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSMBJoinDesc.java @@ -28,7 +28,7 @@ */ public class VectorSMBJoinDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorSMBJoinDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java index c2c9450..1f6ef18 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSelectDesc.java @@ -30,7 +30,7 @@ */ public class VectorSelectDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private VectorExpression[] selectExpressions; private int[] projectedOutputColumns; diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java index 7fb59db..e43b0f6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkHashTableSinkDesc.java @@ -28,7 +28,7 @@ */ public class VectorSparkHashTableSinkDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorSparkHashTableSinkDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkPartitionPruningSinkDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkPartitionPruningSinkDesc.java index c0bc7e4..6c65f83 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkPartitionPruningSinkDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorSparkPartitionPruningSinkDesc.java @@ -28,7 +28,7 @@ */ public class VectorSparkPartitionPruningSinkDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; public VectorSparkPartitionPruningSinkDesc() { } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java index 6e5ebe4..84729a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/VectorTableScanDesc.java @@ -28,7 +28,7 @@ */ public class VectorTableScanDesc extends AbstractVectorDesc { - private static long serialVersionUID = 1L; + private static final long serialVersionUID = 1L; private int[] projectedOutputColumns; diff --git ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java index 145808b..bd1b4f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java +++ ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java @@ -36,7 +36,7 @@ DELETE(), COMPILE(); - public static boolean ONLY_FOR_TESTING = true; + public static final boolean ONLY_FOR_TESTING = true; private boolean usedOnlyForTesting; HiveCommand() { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java index 2c42fae..da799e9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java @@ -93,12 +93,12 @@ protected boolean removeEldestEntry(Map.Entry eldest) { } - static Map extractObjectCache = new HashCache(); - static Map pathExprCache = new HashCache(); - static Map> indexListCache = + Map extractObjectCache = new HashCache(); + Map pathExprCache = new HashCache(); + Map> indexListCache = new HashCache>(); - static Map mKeyGroup1Cache = new HashCache(); - static Map mKeyMatchesCache = new HashCache(); + Map mKeyGroup1Cache = new HashCache(); + Map mKeyMatchesCache = new HashCache(); Text result = new Text(); diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInternalInterval.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInternalInterval.java index fcf291a..0b29c6a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInternalInterval.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInternalInterval.java @@ -51,7 +51,7 @@ public class GenericUDFInternalInterval extends GenericUDF { - private static Map processorMap; + private Map processorMap; private transient IntervalProcessor processor; private transient PrimitiveObjectInspector inputOI; @@ -286,7 +286,7 @@ protected HiveIntervalYearMonth getIntervalYearMonth(String arg) { } } - private static Map getProcessorMap() { + private Map getProcessorMap() { if (processorMap != null) { return processorMap; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CheckFastRowHashMap.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CheckFastRowHashMap.java index 638ccc5..72fceb9 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CheckFastRowHashMap.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CheckFastRowHashMap.java @@ -90,16 +90,11 @@ public static void verifyHashMapRows(List rows, int[] actualToValueMap } } - private static String debugDetailedReadPositionString; - - private static String debugDetailedHashMapResultPositionString; - - private static String debugExceptionMessage; - private static StackTraceElement[] debugStackTrace; - public static void verifyHashMapRowsMore(List rows, int[] actualToValueMap, VectorMapJoinHashMapResult hashMapResult, TypeInfo[] typeInfos, int clipIndex, boolean useExactBytes) throws IOException { + String debugExceptionMessage = null; + StackTraceElement[] debugStackTrace = null; final int count = rows.size(); final int columnCount = typeInfos.length; @@ -134,7 +129,6 @@ public static void verifyHashMapRowsMore(List rows, int[] actualToValu boolean thrown = false; Exception saveException = null; - boolean notExpected = false; int index = 0; try { for (index = 0; index < columnCount; index++) { @@ -144,9 +138,9 @@ public static void verifyHashMapRowsMore(List rows, int[] actualToValu } catch (Exception e) { thrown = true; saveException = e; - debugDetailedReadPositionString = lazyBinaryDeserializeRead.getDetailedReadPositionString(); + lazyBinaryDeserializeRead.getDetailedReadPositionString(); - debugDetailedHashMapResultPositionString = hashMapResult.getDetailedHashMapResultPositionString(); + hashMapResult.getDetailedHashMapResultPositionString(); debugExceptionMessage = saveException.getMessage(); debugStackTrace = saveException.getStackTrace(); @@ -159,7 +153,6 @@ public static void verifyHashMapRowsMore(List rows, int[] actualToValu if (saveException instanceof EOFException) { // This is the one we are expecting. } else if (saveException instanceof ArrayIndexOutOfBoundsException) { - notExpected = true; } else { TestCase.fail("Expecting an EOFException to be thrown for the clipped case..."); } @@ -385,7 +378,7 @@ public void verify(VectorMapJoinFastHashTable map, } } - static int STACK_LENGTH_LIMIT = 20; + static final int STACK_LENGTH_LIMIT = 20; public static String getStackTraceAsSingleLine(StackTraceElement[] stackTrace) { StringBuilder sb = new StringBuilder(); sb.append("Stack trace: "); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java index 90e8f33..3f61f8c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/CommonFastHashTable.java @@ -31,11 +31,11 @@ protected static final int LARGE_CAPACITY = 8388608; protected static Random random; - protected static int MAX_KEY_LENGTH = 100; + protected static final int MAX_KEY_LENGTH = 100; - protected static int MAX_VALUE_LENGTH = 1000; + protected static final int MAX_VALUE_LENGTH = 1000; - public static int generateLargeCount() { + public static synchronized int generateLargeCount() { int count = 0; if (random.nextInt(100) != 0) { switch (random.nextInt(5)) { diff --git serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java index 6802a05..5ecfbca 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/SerDeUtils.java @@ -65,7 +65,7 @@ public static final char COMMA = ','; // we should use '\0' for COLUMN_NAME_DELIMITER if column name contains COMMA // but we should also take care of the backward compatibility - public static char COLUMN_COMMENTS_DELIMITER = '\0'; + public static final char COLUMN_COMMENTS_DELIMITER = '\0'; public static final String LBRACKET = "["; public static final String RBRACKET = "]"; public static final String LBRACE = "{"; diff --git serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java index 88c3da9..ecfe15f 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java +++ serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java @@ -75,7 +75,7 @@ * Flag to print the re-encoding warning message only once. Avoid excessive logging for each * record encoding. */ - private static boolean warnedOnce = false; + private boolean warnedOnce = false; /** * When encountering a record with an older schema than the one we're trying * to read, it is necessary to re-encode with a reader against the newer schema. diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java index 3b35baf..606b246 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java @@ -666,9 +666,7 @@ public void logExceptionMessage(byte[] bytes, int bytesStart, int bytesLength, S //------------------------------------------------------------------------------------------------ - private static byte[] maxLongBytes = ((Long) Long.MAX_VALUE).toString().getBytes(); - private static int maxLongDigitsCount = maxLongBytes.length; - private static byte[] minLongNoSignBytes = ((Long) Long.MIN_VALUE).toString().substring(1).getBytes(); + private static final byte[] maxLongBytes = ((Long) Long.MAX_VALUE).toString().getBytes(); public static int byteArrayCompareRanges(byte[] arg1, int start1, byte[] arg2, int start2, int len) { for (int i = 0; i < len; i++) { diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/StringToDouble.java serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/StringToDouble.java index f50b4fd..c784b69 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/StringToDouble.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/StringToDouble.java @@ -15,12 +15,12 @@ import java.nio.charset.StandardCharsets; public class StringToDouble { - static int maxExponent = 511; /* Largest possible base 10 exponent. Any + static final int maxExponent = 511; /* Largest possible base 10 exponent. Any * exponent larger than this will already * produce underflow or overflow, so there's * no need to worry about additional digits. */ - static double powersOf10[] = { /* Table giving binary powers of 10. Entry */ + static final double powersOf10[] = { /* Table giving binary powers of 10. Entry */ 10., /* is 10^2^i. Used to convert decimal */ 100., /* exponents into floating-point numbers. */ 1.0e4, diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java index f4ac56f..8237b64 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java @@ -402,7 +402,7 @@ public static int writeVLongToByteArray(byte[] bytes, int offset, long l) { return 1 + len; } - public static int VLONG_BYTES_LEN = 9; + public static final int VLONG_BYTES_LEN = 9; private static ThreadLocal vLongBytesThreadLocal = new ThreadLocal() { @Override diff --git serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java index 14349fa..54964e4 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java @@ -869,7 +869,7 @@ public static int getCharacterLengthForType(PrimitiveTypeInfo typeInfo) { } } - public static void registerNumericType(PrimitiveCategory primitiveCategory, int level) { + public static synchronized void registerNumericType(PrimitiveCategory primitiveCategory, int level) { numericTypeList.add(primitiveCategory); numericTypes.put(primitiveCategory, level); } diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java index 7270426..277738f 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java @@ -49,12 +49,12 @@ import com.google.common.collect.Iterables; public class HdfsUtils { + private static final Logger LOG = LoggerFactory.getLogger("shims.HdfsUtils"); // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this // is still going to work. Otherwise, file IDs can be turned off. Later, we should use // as public utility method in HDFS to obtain the inode-based path. - private static String HDFS_ID_PATH_PREFIX = "/.reserved/.inodes/"; - static Logger LOG = LoggerFactory.getLogger("shims.HdfsUtils"); + private static final String HDFS_ID_PATH_PREFIX = "/.reserved/.inodes/"; public static Path getFileIdPath( FileSystem fileSystem, Path path, long fileId) { diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerChain.java shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerChain.java index a58f1f2..d937ddf 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerChain.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerChain.java @@ -35,7 +35,7 @@ */ public class HiveIOExceptionHandlerChain { - public static String HIVE_IO_EXCEPTION_HANDLE_CHAIN = "hive.io.exception.handlers"; + public static final String HIVE_IO_EXCEPTION_HANDLE_CHAIN = "hive.io.exception.handlers"; @SuppressWarnings("unchecked") public static HiveIOExceptionHandlerChain getHiveIOExceptionHandlerChain( diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerUtil.java shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerUtil.java index d972edb..6af3c8c 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerUtil.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HiveIOExceptionHandlerUtil.java @@ -24,10 +24,10 @@ public class HiveIOExceptionHandlerUtil { - private static ThreadLocal handlerChainInstance = + private static final ThreadLocal handlerChainInstance = new ThreadLocal(); - private static HiveIOExceptionHandlerChain get(JobConf job) { + private static synchronized HiveIOExceptionHandlerChain get(JobConf job) { HiveIOExceptionHandlerChain cache = HiveIOExceptionHandlerUtil.handlerChainInstance .get(); if (cache == null) { diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java index 44f24b2..f15e7ff 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java @@ -32,7 +32,7 @@ */ public abstract class ShimLoader { private static final Logger LOG = LoggerFactory.getLogger(ShimLoader.class); - public static String HADOOP23VERSIONNAME = "0.23"; + public static final String HADOOP23VERSIONNAME = "0.23"; private static volatile HadoopShims hadoopShims; private static JettyShims jettyShims; diff --git storage-api/src/java/org/apache/hadoop/hive/common/type/FastHiveDecimalImpl.java storage-api/src/java/org/apache/hadoop/hive/common/type/FastHiveDecimalImpl.java index 7a565dd..f733c1e 100644 --- storage-api/src/java/org/apache/hadoop/hive/common/type/FastHiveDecimalImpl.java +++ storage-api/src/java/org/apache/hadoop/hive/common/type/FastHiveDecimalImpl.java @@ -145,7 +145,6 @@ * Int: 8 decimal digits. An even number and 1/2 of MAX_LONGWORD_DECIMAL. */ private static final int INTWORD_DECIMAL_DIGITS = 8; - private static final int MAX_INTWORD_DECIMAL = (int) powerOfTenTable[INTWORD_DECIMAL_DIGITS] - 1; private static final int MULTIPLER_INTWORD_DECIMAL = (int) powerOfTenTable[INTWORD_DECIMAL_DIGITS]; /** @@ -164,9 +163,6 @@ private static final long MAX_HIGHWORD_DECIMAL = powerOfTenTable[HIGHWORD_DECIMAL_DIGITS] - 1; - private static long HIGHWORD_DIVIDE_FACTOR = powerOfTenTable[LONGWORD_DECIMAL_DIGITS - HIGHWORD_DECIMAL_DIGITS]; - private static long HIGHWORD_MULTIPLY_FACTOR = powerOfTenTable[HIGHWORD_DECIMAL_DIGITS]; - // 38 * 2 or 76 full decimal maximum - (64 + 8) digits in 4 lower longs (4 digits here). private static final long FULL_MAX_HIGHWORD_DECIMAL = powerOfTenTable[MAX_DECIMAL_DIGITS * 2 - (FOUR_X_LONGWORD_DECIMAL_DIGITS + INTWORD_DECIMAL_DIGITS)] - 1; @@ -189,11 +185,6 @@ BigInteger.ONE.add(BIG_INTEGER_MAX_LONGWORD_DECIMAL); private static final BigInteger BIG_INTEGER_LONGWORD_MULTIPLIER_2X = BIG_INTEGER_LONGWORD_MULTIPLIER.multiply(BIG_INTEGER_LONGWORD_MULTIPLIER); - private static final BigInteger BIG_INTEGER_LONGWORD_MULTIPLIER_3X = - BIG_INTEGER_LONGWORD_MULTIPLIER_2X.multiply(BIG_INTEGER_LONGWORD_MULTIPLIER); - private static final BigInteger BIG_INTEGER_LONGWORD_MULTIPLIER_4X = - BIG_INTEGER_LONGWORD_MULTIPLIER_3X.multiply(BIG_INTEGER_LONGWORD_MULTIPLIER); - private static final BigInteger BIG_INTEGER_MAX_HIGHWORD_DECIMAL = BigInteger.valueOf(MAX_HIGHWORD_DECIMAL); private static final BigInteger BIG_INTEGER_HIGHWORD_MULTIPLIER = @@ -203,21 +194,21 @@ // conversion. // There is only one blank in UTF-8. - private final static byte BYTE_BLANK = (byte) ' '; + private static final byte BYTE_BLANK = (byte) ' '; - private final static byte BYTE_DIGIT_ZERO = (byte) '0'; - private final static byte BYTE_DIGIT_NINE = (byte) '9'; + private static final byte BYTE_DIGIT_ZERO = (byte) '0'; + private static final byte BYTE_DIGIT_NINE = (byte) '9'; // Decimal point. - private final static byte BYTE_DOT = (byte) '.'; + private static final byte BYTE_DOT = (byte) '.'; // Sign. - private final static byte BYTE_MINUS = (byte) '-'; - private final static byte BYTE_PLUS = (byte) '+'; + private static final byte BYTE_MINUS = (byte) '-'; + private static final byte BYTE_PLUS = (byte) '+'; // Exponent E or e. - private final static byte BYTE_EXPONENT_LOWER = (byte) 'e'; - private final static byte BYTE_EXPONENT_UPPER = (byte) 'E'; + private static final byte BYTE_EXPONENT_LOWER = (byte) 'e'; + private static final byte BYTE_EXPONENT_UPPER = (byte) 'E'; //************************************************************************************************ // Initialize (fastSetFrom*). @@ -1758,7 +1749,7 @@ private static boolean doDecimalToBinaryConversion( * 4,611,686,018,427,387,904 or * 461,1686018427387904 (16 digit comma'd) */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_62 = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_62 = new FastHiveDecimal(1, 1686018427387904L, 461L, 0, 19, 0); /* @@ -1769,7 +1760,7 @@ private static boolean doDecimalToBinaryConversion( * 9,223,372,036,854,775,808 or * 922,3372036854775808 (16 digit comma'd) */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_63 = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_63 = new FastHiveDecimal(1, 3372036854775808L, 922L, 0, 19, 0); /* @@ -1784,7 +1775,7 @@ private static boolean doDecimalToBinaryConversion( * 42,535,295,865,117,307,932,921,825,928,971,026,432 or * 425352,9586511730793292,1825928971026432 (16 digit comma'd) */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_125 = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_125 = new FastHiveDecimal(1, 1825928971026432L, 9586511730793292L, 425352L, 38, 0); /* @@ -1797,7 +1788,7 @@ private static boolean doDecimalToBinaryConversion( * * 3*16 (48) + 15 --> 63 down shift. */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_63_INVERSE = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_63_INVERSE = new FastHiveDecimal(1, 6994171142578125L, 5044340074528008L, 1084202172485L, 45, 0); /* @@ -2141,7 +2132,7 @@ public static boolean fastSerializationUtilsWrite(OutputStream outputStream, * 72,057,594,037,927,936 or * 7,2057594037927936 (16 digit comma'd) */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_56 = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_56 = new FastHiveDecimal(1, 2057594037927936L, 7L, 0, 17, 0); /* @@ -2154,7 +2145,7 @@ public static boolean fastSerializationUtilsWrite(OutputStream outputStream, * 5,192,296,858,534,827,628,530,496,329,220,096 or * 51,9229685853482762,8530496329220096 (16 digit comma'd) */ - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_112 = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_112 = new FastHiveDecimal(1, 8530496329220096L, 9229685853482762L, 51L, 34, 0); // Multiply by 1/2^56 or 1.387778780781445675529539585113525390625e-17 to divide by 2^56. @@ -2164,7 +2155,7 @@ public static boolean fastSerializationUtilsWrite(OutputStream outputStream, // // 3*16 (48) + 8 --> 56 down shift. // - private static FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_56_INVERSE = + private static final FastHiveDecimal FAST_HIVE_DECIMAL_TWO_POWER_56_INVERSE = new FastHiveDecimal(1, 9585113525390625L, 8078144567552953L, 13877787L, 40, 0); /* @@ -2175,16 +2166,16 @@ public static boolean fastSerializationUtilsWrite(OutputStream outputStream, private static final int BIG_INTEGER_BYTES_QUOTIENT_INTEGER_WORD_NUM = 3; private static final int BIG_INTEGER_BYTES_QUOTIENT_INTEGER_DIGIT_NUM = 8; - private static int INITIAL_SHIFT = 48; // 56 bits minus 1 byte. + private static final int INITIAL_SHIFT = 48; // 56 bits minus 1 byte. // Long masks and values. - private static long LONG_56_BIT_MASK = 0xFFFFFFFFFFFFFFL; - private static long LONG_TWO_TO_56_POWER = LONG_56_BIT_MASK + 1L; - private static long LONG_BYTE_MASK = 0xFFL; - private static long LONG_BYTE_HIGH_BIT_MASK = 0x80L; + private static final long LONG_56_BIT_MASK = 0xFFFFFFFFFFFFFFL; + private static final long LONG_TWO_TO_56_POWER = LONG_56_BIT_MASK + 1L; + private static final long LONG_BYTE_MASK = 0xFFL; + private static final long LONG_BYTE_HIGH_BIT_MASK = 0x80L; // Byte values. - private static byte BYTE_ALL_BITS = (byte) 0xFF; + private static final byte BYTE_ALL_BITS = (byte) 0xFF; /** * Convert bytes in the format used by BigInteger's toByteArray format (and accepted by its @@ -2838,32 +2829,32 @@ public static int fastBigIntegerBytesScaled( // Decimal to Integer conversion. private static final int MAX_BYTE_DIGITS = 3; - private static FastHiveDecimal FASTHIVEDECIMAL_MIN_BYTE_VALUE_MINUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MIN_BYTE_VALUE_MINUS_ONE = new FastHiveDecimal((long) Byte.MIN_VALUE - 1L); - private static FastHiveDecimal FASTHIVEDECIMAL_MAX_BYTE_VALUE_PLUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MAX_BYTE_VALUE_PLUS_ONE = new FastHiveDecimal((long) Byte.MAX_VALUE + 1L); private static final int MAX_SHORT_DIGITS = 5; - private static FastHiveDecimal FASTHIVEDECIMAL_MIN_SHORT_VALUE_MINUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MIN_SHORT_VALUE_MINUS_ONE = new FastHiveDecimal((long) Short.MIN_VALUE - 1L); - private static FastHiveDecimal FASTHIVEDECIMAL_MAX_SHORT_VALUE_PLUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MAX_SHORT_VALUE_PLUS_ONE = new FastHiveDecimal((long) Short.MAX_VALUE + 1L); private static final int MAX_INT_DIGITS = 10; - private static FastHiveDecimal FASTHIVEDECIMAL_MIN_INT_VALUE_MINUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MIN_INT_VALUE_MINUS_ONE = new FastHiveDecimal((long) Integer.MIN_VALUE - 1L); - private static FastHiveDecimal FASTHIVEDECIMAL_MAX_INT_VALUE_PLUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MAX_INT_VALUE_PLUS_ONE = new FastHiveDecimal((long) Integer.MAX_VALUE + 1L); - private static FastHiveDecimal FASTHIVEDECIMAL_MIN_LONG_VALUE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MIN_LONG_VALUE = new FastHiveDecimal(Long.MIN_VALUE); - private static FastHiveDecimal FASTHIVEDECIMAL_MAX_LONG_VALUE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MAX_LONG_VALUE = new FastHiveDecimal(Long.MAX_VALUE); private static final int MAX_LONG_DIGITS = FASTHIVEDECIMAL_MAX_LONG_VALUE.fastIntegerDigitCount; - private static FastHiveDecimal FASTHIVEDECIMAL_MIN_LONG_VALUE_MINUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MIN_LONG_VALUE_MINUS_ONE = new FastHiveDecimal("-9223372036854775809"); - private static FastHiveDecimal FASTHIVEDECIMAL_MAX_LONG_VALUE_PLUS_ONE = + private static final FastHiveDecimal FASTHIVEDECIMAL_MAX_LONG_VALUE_PLUS_ONE = new FastHiveDecimal("9223372036854775808"); private static final BigInteger BIG_INTEGER_UNSIGNED_BYTE_MAX_VALUE = BIG_INTEGER_TWO.pow(Byte.SIZE).subtract(BigInteger.ONE); @@ -9355,7 +9346,7 @@ public static void fastRaiseInvalidException( //************************************************************************************************ // Decimal Debugging. - static int STACK_LENGTH_LIMIT = 20; + static final int STACK_LENGTH_LIMIT = 20; public static String getStackTraceAsSingleLine(StackTraceElement[] stackTrace) { StringBuilder sb = new StringBuilder(); sb.append("Stack trace: "); diff --git storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java index 8d950a2..eeb3359 100644 --- storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java +++ storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java @@ -102,7 +102,7 @@ public static Date getRandDate(Random r) { public static final long MILLISECONDS_PER_SECOND = TimeUnit.SECONDS.toMillis(1); public static final long NANOSECONDS_PER_MILLISSECOND = TimeUnit.MILLISECONDS.toNanos(1); - private static ThreadLocal DATE_FORMAT = + private static final ThreadLocal DATE_FORMAT = new ThreadLocal() { @Override protected DateFormat initialValue() { @@ -111,10 +111,10 @@ protected DateFormat initialValue() { }; // We've switched to Joda/Java Calendar which has a more limited time range.... - public static int MIN_YEAR = 1900; - public static int MAX_YEAR = 3000; - private static long MIN_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("1900-01-01 00:00:00"); - private static long MAX_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("3000-01-01 00:00:00"); + public static final int MIN_YEAR = 1900; + public static final int MAX_YEAR = 3000; + private static final long MIN_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("1900-01-01 00:00:00"); + private static final long MAX_FOUR_DIGIT_YEAR_MILLIS = parseToMillis("3000-01-01 00:00:00"); private static long parseToMillis(String s) { try { diff --git testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java index 41ade5f..140c198 100644 --- testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java +++ testutils/src/java/org/apache/hive/testutils/jdbc/HiveBurnInClient.java @@ -26,10 +26,10 @@ public class HiveBurnInClient { - private static String driverName = "org.apache.hive.jdbc.HiveDriver"; + private static final String driverName = "org.apache.hive.jdbc.HiveDriver"; //default 80k (runs slightly over 1 day long) - private final static int NUM_QUERY_ITERATIONS = 80000; + private static final int NUM_QUERY_ITERATIONS = 80000; /** * Creates 2 tables to query from