diff --git metastore/pom.xml metastore/pom.xml index 5430580749..04c6f47879 100644 --- metastore/pom.xml +++ metastore/pom.xml @@ -268,20 +268,6 @@ - org.antlr - antlr3-maven-plugin - - - - antlr - - - - - ${basedir}/src/java - - - org.apache.maven.plugins maven-jar-plugin diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index d7c33c314c..12faf8202a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -190,17 +190,6 @@ @VisibleForTesting static long TEST_TIMEOUT_VALUE = -1; - /** A fixed date format to be used for hive partition column values. */ - public static final ThreadLocal PARTITION_DATE_FORMAT = - new ThreadLocal() { - @Override - protected DateFormat initialValue() { - DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); - val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. - return val; - }; - }; - public static final String ADMIN = "admin"; public static final String PUBLIC = "public"; /** MM write states. */ diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 96c887142f..5354e704d8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -126,7 +126,6 @@ // configuration parameter documentation // HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES in HiveConf as well. public static final char[] specialCharactersInTableNames = new char[] { '/' }; - final static Charset ENCODING = StandardCharsets.UTF_8; public static Table createColumnsetSchema(String name, List columns, List partCols, Configuration conf) throws MetaException { @@ -1183,15 +1182,6 @@ public static Properties getSchema( return addCols(getSchemaWithoutCols(sd, tblsd, parameters, databaseName, tableName, partitionKeys), tblsd.getCols()); } - public static List getColumnNamesForTable(Table table) { - List colNames = new ArrayList(); - Iterator colsIterator = table.getSd().getColsIterator(); - while (colsIterator.hasNext()) { - colNames.add(colsIterator.next().getName()); - } - return colNames; - } - public static String getColumnNameDelimiter(List fieldSchemas) { // we first take a look if any fieldSchemas contain COMMA for (int i = 0; i < fieldSchemas.size(); i++) { @@ -1792,15 +1782,6 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) /** * convert Exception to MetaException, which sets the cause to such exception - * @param e cause of the exception - * @return the MetaException with the specified exception as the cause - */ - public static MetaException newMetaException(Exception e) { - return newMetaException(e != null ? e.getMessage() : null, e); - } - - /** - * convert Exception to MetaException, which sets the cause to such exception * @param errorMessage the error message for this MetaException * @param e cause of the exception * @return the MetaException with the specified exception as the cause @@ -1821,174 +1802,12 @@ public static MetaException newMetaException(String errorMessage, Exception e) { return cols; } - - // given a list of partStats, this function will give you an aggr stats - public static List aggrPartitionStats(List partStats, - String dbName, String tableName, List partNames, List colNames, - boolean useDensityFunctionForNDVEstimation, double ndvTuner) - throws MetaException { - // 1. group by the stats by colNames - // map the colName to List - Map> map = new HashMap<>(); - for (ColumnStatistics css : partStats) { - List objs = css.getStatsObj(); - for (ColumnStatisticsObj obj : objs) { - List singleObj = new ArrayList<>(); - singleObj.add(obj); - ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj); - if (!map.containsKey(obj.getColName())) { - map.put(obj.getColName(), new ArrayList()); - } - map.get(obj.getColName()).add(singleCS); - } - } - return aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); - } - - public static List aggrPartitionStats( - Map> map, String dbName, String tableName, - final List partNames, List colNames, - final boolean useDensityFunctionForNDVEstimation,final double ndvTuner) throws MetaException { - List colStats = new ArrayList<>(); - // 2. Aggregate stats for each column in a separate thread - if (map.size()< 1) { - //stats are absent in RDBMS - LOG.debug("No stats data found for: dbName=" +dbName +" tblName=" + tableName + - " partNames= " + partNames + " colNames=" + colNames ); - return colStats; - } - final ExecutorService pool = Executors.newFixedThreadPool(Math.min(map.size(), 16), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("aggr-col-stats-%d").build()); - final List> futures = Lists.newLinkedList(); - - long start = System.currentTimeMillis(); - for (final Entry> entry : map.entrySet()) { - futures.add(pool.submit(new Callable() { - @Override - public ColumnStatisticsObj call() throws Exception { - List css = entry.getValue(); - ColumnStatsAggregator aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css - .iterator().next().getStatsObj().iterator().next().getStatsData().getSetField(), - useDensityFunctionForNDVEstimation, ndvTuner); - ColumnStatisticsObj statsObj = aggregator.aggregate(entry.getKey(), partNames, css); - return statsObj; - }})); - } - pool.shutdown(); - for (Future future : futures) { - try { - colStats.add(future.get()); - } catch (InterruptedException | ExecutionException e) { - pool.shutdownNow(); - LOG.debug(e.toString()); - throw new MetaException(e.toString()); - } - } - LOG.debug("Time for aggr col stats in seconds: {} Threads used: {}", - ((System.currentTimeMillis() - (double)start))/1000, Math.min(map.size(), 16)); - return colStats; - } - - - /** - * Produce a hash for the storage descriptor - * @param sd storage descriptor to hash - * @param md message descriptor to use to generate the hash - * @return the hash as a byte array - */ - public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { - // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different - // results for hashes based on the OS or JVM being used. - md.reset(); - for (FieldSchema fs : sd.getCols()) { - md.update(fs.getName().getBytes(ENCODING)); - md.update(fs.getType().getBytes(ENCODING)); - if (fs.getComment() != null) { - md.update(fs.getComment().getBytes(ENCODING)); - } - } - if (sd.getInputFormat() != null) { - md.update(sd.getInputFormat().getBytes(ENCODING)); - } - if (sd.getOutputFormat() != null) { - md.update(sd.getOutputFormat().getBytes(ENCODING)); - } - md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); - if (sd.getSerdeInfo() != null) { - SerDeInfo serde = sd.getSerdeInfo(); - if (serde.getName() != null) { - md.update(serde.getName().getBytes(ENCODING)); - } - if (serde.getSerializationLib() != null) { - md.update(serde.getSerializationLib().getBytes(ENCODING)); - } - if (serde.getParameters() != null) { - SortedMap params = new TreeMap<>(serde.getParameters()); - for (Entry param : params.entrySet()) { - md.update(param.getKey().getBytes(ENCODING)); - md.update(param.getValue().getBytes(ENCODING)); - } - } - } - if (sd.getBucketCols() != null) { - List bucketCols = new ArrayList<>(sd.getBucketCols()); - for (String bucket : bucketCols) { - md.update(bucket.getBytes(ENCODING)); - } - } - if (sd.getSortCols() != null) { - SortedSet orders = new TreeSet<>(sd.getSortCols()); - for (Order order : orders) { - md.update(order.getCol().getBytes(ENCODING)); - md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); - } - } - if (sd.getSkewedInfo() != null) { - SkewedInfo skewed = sd.getSkewedInfo(); - if (skewed.getSkewedColNames() != null) { - SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); - for (String colname : colnames) { - md.update(colname.getBytes(ENCODING)); - } - } - if (skewed.getSkewedColValues() != null) { - SortedSet sortedOuterList = new TreeSet<>(); - for (List innerList : skewed.getSkewedColValues()) { - SortedSet sortedInnerList = new TreeSet<>(innerList); - sortedOuterList.add(StringUtils.join(sortedInnerList, ".")); - } - for (String colval : sortedOuterList) { - md.update(colval.getBytes(ENCODING)); - } - } - if (skewed.getSkewedColValueLocationMaps() != null) { - SortedMap sortedMap = new TreeMap<>(); - for (Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { - SortedSet sortedKey = new TreeSet<>(smap.getKey()); - sortedMap.put(StringUtils.join(sortedKey, "."), smap.getValue()); - } - for (Entry e : sortedMap.entrySet()) { - md.update(e.getKey().getBytes(ENCODING)); - md.update(e.getValue().getBytes(ENCODING)); - } - } - md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); - } - - return md.digest(); - } - - public static double decimalToDouble(Decimal decimal) { - return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); - } - /** * Verify if the user is allowed to make DB notification related calls. * Only the superusers defined in the Hadoop proxy user settings have the permission. * * @param user the short user name - * @param config that contains the proxy user settings + * @param conf that contains the proxy user settings * @return if the user has the permission */ public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) { diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java new file mode 100644 index 0000000000..fa4e02ac79 --- /dev/null +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore2.java @@ -0,0 +1,229 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.model.MNotificationLog; +import org.apache.hadoop.hive.metastore.model.MNotificationNextId; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hive.metastore.TestOldSchema.dropAllStoreObjects; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +// Tests from TestObjectStore that can't be moved yet due to references to EventMessage. Once +// EventMessage has been moved this should be recombined with TestObjectStore. + +public class TestObjectStore2 { + private ObjectStore objectStore = null; + + public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { + @Override + public String convertExprToFilter(byte[] expr) throws MetaException { + return null; + } + + @Override + public boolean filterPartitionsByExpr(List partColumns, + byte[] expr, String defaultPartitionName, List partitionNames) + throws MetaException { + return false; + } + + @Override + public FileMetadataExprType getMetadataType(String inputFormat) { + return null; + } + + @Override + public SearchArgument createSarg(byte[] expr) { + return null; + } + + @Override + public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { + return null; + } + } + + @Before + public void setUp() throws Exception { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + + objectStore = new ObjectStore(); + objectStore.setConf(conf); + dropAllStoreObjects(objectStore); + } + + /** + * Test notification operations + */ + // TODO MS-SPLIT uncomment once we move EventMessage over + @Test + public void testNotificationOps() throws InterruptedException { + final int NO_EVENT_ID = 0; + final int FIRST_EVENT_ID = 1; + final int SECOND_EVENT_ID = 2; + + NotificationEvent event = + new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); + NotificationEventResponse eventResponse; + CurrentNotificationEventId eventId; + + // Verify that there is no notifications available yet + eventId = objectStore.getCurrentNotificationEventId(); + assertEquals(NO_EVENT_ID, eventId.getEventId()); + + // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID + objectStore.addNotificationEvent(event); + assertEquals(FIRST_EVENT_ID, event.getEventId()); + objectStore.addNotificationEvent(event); + assertEquals(SECOND_EVENT_ID, event.getEventId()); + + // Verify that objectStore fetches the latest notification event ID + eventId = objectStore.getCurrentNotificationEventId(); + assertEquals(SECOND_EVENT_ID, eventId.getEventId()); + + // Verify that getNextNotification() returns all events + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + assertEquals(2, eventResponse.getEventsSize()); + assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); + + // Verify that getNextNotification(last) returns events after a specified event + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); + assertEquals(1, eventResponse.getEventsSize()); + assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); + + // Verify that getNextNotification(last) returns zero events if there are no more notifications available + eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); + assertEquals(0, eventResponse.getEventsSize()); + + // Verify that cleanNotificationEvents() cleans up all old notifications + Thread.sleep(1); + objectStore.cleanNotificationEvents(1); + eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); + assertEquals(0, eventResponse.getEventsSize()); + } + + @Ignore( + "This test is here to allow testing with other databases like mysql / postgres etc\n" + + " with user changes to the code. This cannot be run on apache derby because of\n" + + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" + ) + @Test + public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { + + final int NUM_THREADS = 10; + CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, + () -> LoggerFactory.getLogger("test") + .debug(NUM_THREADS + " threads going to add notification")); + + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + /* + Below are the properties that need to be set based on what database this test is going to be run + */ + +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); +// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, +// "jdbc:mysql://localhost:3306/metastore_db"); +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); +// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); + + /* + we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL + and we don't run the schema creation sql that includes the an insert for notification_sequence + which can be locked. the entry in notification_sequence happens via notification_event insertion. + */ + objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); + objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); + + objectStore.addNotificationEvent( + new NotificationEvent(0, 0, + EventMessage.EventType.CREATE_DATABASE.toString(), + "CREATE DATABASE DB initial")); + + ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); + for (int i = 0; i < NUM_THREADS; i++) { + final int n = i; + + executorService.execute( + () -> { + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); + NotificationEvent dbEvent = + new NotificationEvent(0, 0, eventType, + "CREATE DATABASE DB" + n); + System.out.println("ADDING NOTIFICATION"); + + try { + cyclicBarrier.await(); + } catch (InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + store.addNotificationEvent(dbEvent); + System.out.println("FINISH NOTIFICATION"); + }); + } + executorService.shutdown(); + assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); + + // we have to setup this again as the underlying PMF keeps getting reinitialized with original + // reference closed + ObjectStore store = new ObjectStore(); + store.setConf(conf); + + NotificationEventResponse eventResponse = store.getNextNotification( + new NotificationEventRequest()); + assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); + long previousId = 0; + for (NotificationEvent event : eventResponse.getEvents()) { + assertTrue("previous:" + previousId + " current:" + event.getEventId(), + previousId < event.getEventId()); + assertTrue(previousId + 1 == event.getEventId()); + previousId = event.getEventId(); + } + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 41fbb0cf8f..50a55d8f7d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; @@ -50,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -1753,7 +1753,7 @@ private static String normalizeDateCol( } else { throw new SemanticException("Unexpected date type " + colValue.getClass()); } - return HiveMetaStore.PARTITION_DATE_FORMAT.get().format(value); + return MetaStoreUtils.PARTITION_DATE_FORMAT.get().format(value); } protected WriteEntity toWriteEntity(String location) throws SemanticException { diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java index b26401db18..5c6bbbd210 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; -import org.apache.hadoop.hive.metastore.hbase.MetadataStore; +import org.apache.hadoop.hive.metastore.MetadataStore; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.io.orc.ExternalCache.ExternalFooterCachesByConf; import org.apache.hadoop.hive.ql.metadata.HiveException; diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index acc50ca430..07b767bc13 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -33,9 +33,9 @@ - com.jolbox - bonecp - ${bonecp.version} + com.fasterxml.jackson.core + jackson-databind + ${jackson.new.version} com.github.joshelser @@ -53,6 +53,11 @@ ${protobuf.version} + com.jolbox + bonecp + ${bonecp.version} + + com.zaxxer HikariCP ${hikaricp.version} @@ -78,6 +83,16 @@ ${dropwizard.version} + javolution + javolution + ${javolution.version} + + + org.antlr + antlr-runtime + ${antlr.version} + + org.apache.commons commons-lang3 ${commons-lang3.version} @@ -284,6 +299,20 @@ + org.antlr + antlr3-maven-plugin + + + + antlr + + + + + ${basedir}/src/main/java + + + org.apache.maven.plugins maven-antrun-plugin diff --git common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java similarity index 97% rename from common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java index 7c27d07024..e2e3ada9e8 100644 --- common/src/java/org/apache/hadoop/hive/common/StatsSetupConst.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,10 +67,10 @@ public String getAggregator(Configuration conf) { custom { @Override public String getPublisher(Configuration conf) { - return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_DEFAULT_PUBLISHER); } + return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_PUBLISHER); } @Override public String getAggregator(Configuration conf) { - return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_DEFAULT_AGGREGATOR); } + return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_AGGREGATOR); } }; public abstract String getPublisher(Configuration conf); public abstract String getAggregator(Configuration conf); @@ -170,7 +171,7 @@ public String getAggregator(Configuration conf) { @Override public void serialize(Boolean value, JsonGenerator jsonGenerator, - SerializerProvider serializerProvider) throws IOException, JsonProcessingException { + SerializerProvider serializerProvider) throws IOException { jsonGenerator.writeString(value.toString()); } } @@ -179,7 +180,7 @@ public void serialize(Boolean value, JsonGenerator jsonGenerator, public Boolean deserialize(JsonParser jsonParser, DeserializationContext deserializationContext) - throws IOException, JsonProcessingException { + throws IOException { return Boolean.valueOf(jsonParser.getValueAsString()); } } @@ -196,7 +197,7 @@ public Boolean deserialize(JsonParser jsonParser, @JsonDeserialize(contentUsing = BooleanDeserializer.class) TreeMap columnStats = new TreeMap<>(); - }; + } public static boolean areBasicStatsUptoDate(Map params) { if (params == null) { diff --git common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java similarity index 64% rename from common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java index ed0db14b9d..668db10576 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,28 +24,28 @@ public interface NumDistinctValueEstimator { - static final Logger LOG = LoggerFactory.getLogger(NumDistinctValueEstimator.class.getName()); + Logger LOG = LoggerFactory.getLogger(NumDistinctValueEstimator.class.getName()); - public void reset(); + void reset(); - public byte[] serialize(); + byte[] serialize(); - public NumDistinctValueEstimator deserialize(byte[] buf); + NumDistinctValueEstimator deserialize(byte[] buf); - public void addToEstimator(long v); + void addToEstimator(long v); - public void addToEstimator(double d); + void addToEstimator(double d); - public void addToEstimator(String s); + void addToEstimator(String s); - public void addToEstimator(HiveDecimal decimal); + void addToEstimator(HiveDecimal decimal); - public void mergeEstimators(NumDistinctValueEstimator o); + void mergeEstimators(NumDistinctValueEstimator o); - public long estimateNumDistinctValues(); + long estimateNumDistinctValues(); - public int lengthFor(JavaDataModel model); + int lengthFor(JavaDataModel model); - public boolean canMerge(NumDistinctValueEstimator o); + boolean canMerge(NumDistinctValueEstimator o); } diff --git common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java index ca9075914f..4e4dfb7a21 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/NumDistinctValueEstimatorFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java similarity index 98% rename from common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java index 36a49c226d..f6cdc4ce8e 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketch.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,7 @@ import java.io.InputStream; import java.util.Random; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.ql.util.JavaDataModel; @@ -313,7 +313,7 @@ public long estimateNumDistinctValues() { return ((long)(numDistinctValues)); } - @InterfaceAudience.LimitedPrivate(value = { "Hive" }) + @InterfaceAudience.LimitedPrivate(value = {"Hive" }) static int lengthFor(JavaDataModel model, Integer numVector) { int length = model.object(); length += model.primitive1() * 2; // two int diff --git common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java index 0150678447..02c64b8321 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/fm/FMSketchUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java index ded8edd93a..f30750fee4 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java index 00cb039db1..c52746e61f 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLDenseRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java similarity index 93% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java index eefc60fbd6..a90094db43 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ * - hashcode to add * @return true if register value is updated else false */ - public boolean add(long hashcode); + boolean add(long hashcode); /** * Instead of specifying hashcode, this interface can be used to directly @@ -39,12 +39,12 @@ * - register value * @return true if register value is updated else false */ - public boolean set(int idx, byte value); + boolean set(int idx, byte value); /** * Merge hyperloglog registers of the same type (SPARSE or DENSE register) * @param reg * - register to be merged */ - public void merge(HLLRegister reg); + void merge(HLLRegister reg); } diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java index a4a5ba9015..82085dd056 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HLLSparseRegister.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +46,7 @@ public HLLSparseRegister(int p, int pp, int qp) { this.p = p; - this.sparseMap = new TreeMap(); + this.sparseMap = new TreeMap<>(); this.tempList = new int[HLLConstants.TEMP_LIST_DEFAULT_SIZE]; this.tempListIdx = 0; this.pPrime = pp; diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java index b80a0ac3ed..8bdb47b431 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java similarity index 99% rename from common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java index 2d82bd08e6..4e6510b7fa 100644 --- common/src/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/common/ndv/hll/HyperLogLogUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java index 99bd7b06fd..2e00005452 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/Deadline.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Deadline.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -100,7 +101,7 @@ public static void resetTimeout(long timeoutMs) throws MetaException { /** * start the timer before a method is invoked. - * @param method + * @param method method to be invoked */ public static boolean startTimer(String method) throws MetaException { Deadline deadline = getCurrentDeadline(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/DeadlineException.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DeadlineException.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/DeadlineException.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DeadlineException.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java index 832daec736..4c14ab0a11 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.hbase.MetadataStore; /** * The base implementation of a file metadata handler for a specific file type. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 9c51d8e1fa..36fb50d575 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,6 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -66,6 +64,8 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MDatabase; import org.apache.hadoop.hive.metastore.model.MNotificationLog; @@ -80,6 +80,7 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hive.common.util.BloomFilter; import org.datanucleus.store.rdbms.query.ForwardQueryResult; import org.slf4j.Logger; @@ -146,7 +147,7 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche dbType = DatabaseProduct.OTHER; } this.dbType = dbType; - int batchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE); + int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_PARTITION_BATCH_SIZE); if (batchSize == DETECT_BATCHING) { batchSize = DatabaseProduct.needsInBatching(dbType) ? 1000 : NO_BATCHING; } @@ -162,10 +163,10 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche } convertMapNullsToEmptyStrings = - HiveConf.getBoolVar(conf, ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS); - defaultPartName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + MetastoreConf.getBoolVar(conf, ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS); + defaultPartName = MetastoreConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); - String jdoIdFactory = HiveConf.getVar(conf, ConfVars.METASTORE_IDENTIFIER_FACTORY); + String jdoIdFactory = MetastoreConf.getVar(conf, ConfVars.IDENTIFIER_FACTORY); if (! ("datanucleus1".equalsIgnoreCase(jdoIdFactory))){ LOG.warn("Underlying metastore does not use 'datanucleus1' for its ORM naming scheme." + " Disabling directSQL as it uses hand-hardcoded SQL with that assumption."); @@ -177,8 +178,8 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche } } - isAggregateStatsCacheEnabled = HiveConf.getBoolVar( - conf, ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED); + isAggregateStatsCacheEnabled = MetastoreConf.getBoolVar( + conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED); if (isAggregateStatsCacheEnabled) { aggrStatsCache = AggregateStatsCache.getInstance(conf); } @@ -1168,7 +1169,7 @@ public void visit(LeafNode node) throws MetaException { // Filter.g cannot parse a quoted date; try to parse date here too. try { nodeValue = new java.sql.Date( - HiveMetaStore.PARTITION_DATE_FORMAT.get().parse((String)nodeValue).getTime()); + org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.PARTITION_DATE_FORMAT.get().parse((String)nodeValue).getTime()); valType = FilterType.Date; } catch (ParseException pe) { // do nothing, handled below - types will mismatch } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java similarity index 97% rename from metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java index d427fef71d..26e2c499ad 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/MetadataStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetadataStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hive.metastore.hbase; +package org.apache.hadoop.hive.metastore; import java.io.IOException; import java.nio.ByteBuffer; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java similarity index 93% rename from metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index d04a343913..ffb2abdf62 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import java.io.IOException; import java.lang.reflect.Field; @@ -67,16 +68,12 @@ import com.codahale.metrics.MetricRegistry; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -131,6 +128,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; @@ -168,10 +166,11 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.hadoop.util.StringUtils; -import org.apache.hive.common.util.HiveStringUtils; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -209,7 +208,7 @@ private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false); private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class.getName()); - private static enum TXN_STATUS { + private enum TXN_STATUS { NO_STATE, OPEN, COMMITED, ROLLBACK } @@ -218,7 +217,7 @@ private static final String USER; private static final String JDO_PARAM = ":param"; static { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("table", MTable.class); map.put("storagedescriptor", MStorageDescriptor.class); map.put("serdeinfo", MSerDeInfo.class); @@ -250,7 +249,7 @@ private MetaStoreDirectSql directSql = null; private DatabaseProduct dbType = null; private PartitionExpressionProxy expressionProxy = null; - private Configuration hiveConf; + private Configuration conf; private volatile int openTrasactionCalls = 0; private Transaction currentTransaction = null; private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE; @@ -281,7 +280,7 @@ public ObjectStore() { @Override public Configuration getConf() { - return hiveConf; + return conf; } /** @@ -298,7 +297,7 @@ public void setConf(Configuration conf) { pmfPropLock.lock(); try { isInitialized = false; - hiveConf = conf; + this.conf = conf; configureSSL(conf); Properties propsFromConf = getDataSourceProps(conf); boolean propsChanged = !propsFromConf.equals(prop); @@ -329,7 +328,7 @@ public void setConf(Configuration conf) { initialize(propsFromConf); String partitionValidationRegex = - hiveConf.get(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.name()); + MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN); if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) { partitionValidationPattern = Pattern.compile(partitionValidationRegex); } else { @@ -364,10 +363,9 @@ public void setConf(Configuration conf) { @SuppressWarnings("nls") private void initialize(Properties dsProps) { - int retryLimit = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.HMSHANDLERATTEMPTS); - long retryInterval = HiveConf.getTimeVar(hiveConf, - HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS); + int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMSHANDLERATTEMPTS); + long retryInterval = MetastoreConf.getTimeVar(conf, + ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS); int numTries = retryLimit; while (numTries > 0){ @@ -403,7 +401,7 @@ private void initialize(Properties dsProps) { } private static final Set> retriableExceptionClasses = - new HashSet>(Arrays.asList(JDOCanRetryException.class)); + new HashSet<>(Arrays.asList(JDOCanRetryException.class)); /** * Helper function for initialize to determine if we should retry an exception. * We return true if the exception is of a known type of retriable exceptions, or if one @@ -438,9 +436,7 @@ private void initializeHelper(Properties dsProps) { pm = getPersistenceManager(); try { String productName = MetaStoreDirectSql.getProductName(pm); - sqlGenerator = new SQLGenerator( - DatabaseProduct.determineDatabaseProduct(productName), - new HiveConf(hiveConf, ObjectStore.class)); + sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf); } catch (SQLException e) { LOG.error("error trying to figure out the database product", e); throw new RuntimeException(e); @@ -448,13 +444,13 @@ private void initializeHelper(Properties dsProps) { isInitialized = pm != null; if (isInitialized) { dbType = determineDatabaseProduct(); - expressionProxy = createExpressionProxy(hiveConf); - if (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)) { + expressionProxy = createExpressionProxy(conf); + if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) { String schema = prop.getProperty("javax.jdo.mapping.Schema"); if (schema != null && schema.isEmpty()) { schema = null; } - directSql = new MetaStoreDirectSql(pm, hiveConf, schema); + directSql = new MetaStoreDirectSql(pm, conf, schema); } } LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + @@ -490,13 +486,12 @@ private static String getProductName(PersistenceManager pm) { * @return The partition expression proxy. */ private static PartitionExpressionProxy createExpressionProxy(Configuration conf) { - String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS); try { @SuppressWarnings("unchecked") Class clazz = - (Class)MetaStoreUtils.getClass(className); - return MetaStoreUtils.newInstance( - clazz, new Class[0], new Object[0]); + JavaUtils.getClass(className, PartitionExpressionProxy.class); + return JavaUtils.newInstance(clazz, new Class[0], new Object[0]); } catch (MetaException e) { LOG.error("Error loading PartitionExpressionProxy", e); throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage()); @@ -509,7 +504,7 @@ private static PartitionExpressionProxy createExpressionProxy(Configuration conf */ private static void configureSSL(Configuration conf) { // SSL support - String sslPropString = conf.get(HiveConf.ConfVars.METASTORE_DBACCESS_SSL_PROPS.varname); + String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS); if (org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) { LOG.info("Metastore setting SSL properties of the connection to backed DB"); for (String sslProp : sslPropString.split(",")) { @@ -517,7 +512,7 @@ private static void configureSSL(Configuration conf) { if (pair != null && pair.length == 2) { System.setProperty(pair[0].trim(), pair[1].trim()); } else { - LOG.warn("Invalid metastore property value for " + HiveConf.ConfVars.METASTORE_DBACCESS_SSL_PROPS); + LOG.warn("Invalid metastore property value for " + ConfVars.DBACCESS_SSL_PROPS); } } } @@ -532,23 +527,41 @@ private static Properties getDataSourceProps(Configuration conf) { Properties prop = new Properties(); correctAutoStartMechanism(conf); - Iterator> iter = conf.iterator(); - while (iter.hasNext()) { - Map.Entry e = iter.next(); - if (e.getKey().contains("datanucleus") || e.getKey().contains("jdo")) { - Object prevVal = prop.setProperty(e.getKey(), conf.get(e.getKey())); - if (LOG.isDebugEnabled() - && !e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { + // First, go through and set all our values for datanucleus and javax.jdo parameters. This + // has to be a separate first step because we don't set the default values in the config object. + for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) { + String confVal = MetastoreConf.getAsString(conf, var); + Object prevVal = prop.setProperty(var.varname, confVal); + if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(var.varname)) { + LOG.debug("Overriding " + var.varname + " value " + prevVal + + " from jpox.properties with " + confVal); + } + } + + // Now, we need to look for any values that the user set that MetastoreConf doesn't know about. + // TODO Commenting this out for now, as it breaks because the conf values aren't getting properly + // interpolated in case of variables. See HIVE-17788. + /* + for (Map.Entry e : conf) { + if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) { + // We have to handle this differently depending on whether it is a value known to + // MetastoreConf or not. If it is, we need to get the default value if a value isn't + // provided. If not, we just set whatever the user has set. + Object prevVal = prop.setProperty(e.getKey(), e.getValue()); + if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) { LOG.debug("Overriding " + e.getKey() + " value " + prevVal + " from jpox.properties with " + e.getValue()); } } } + */ + // Password may no longer be in the conf, use getPassword() try { String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD); if (passwd != null && !passwd.isEmpty()) { - prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd); + // We can get away with the use of varname here because varname == hiveName for PWD + prop.setProperty(ConfVars.PWD.varname, passwd); } } catch (IOException err) { throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err); @@ -556,7 +569,7 @@ private static Properties getDataSourceProps(Configuration conf) { if (LOG.isDebugEnabled()) { for (Entry e : prop.entrySet()) { - if (!e.getKey().equals(HiveConf.ConfVars.METASTOREPWD.varname)) { + if (MetastoreConf.isPrintable(e.getKey().toString())) { LOG.debug(e.getKey() + " = " + e.getValue()); } } @@ -586,7 +599,7 @@ private static void correctAutoStartMechanism(Configuration conf) { private static synchronized PersistenceManagerFactory getPMF() { if (pmf == null) { - HiveConf conf = new HiveConf(ObjectStore.class); + Configuration conf = MetastoreConf.newMetastoreConf(); DataSourceProvider dsp = DataSourceProviderFactory.getDataSourceProvider(conf); if (dsp == null) { pmf = JDOHelper.getPersistenceManagerFactory(prop); @@ -608,7 +621,7 @@ private static synchronized PersistenceManagerFactory getPMF() { } DataStoreCache dsc = pmf.getDataStoreCache(); if (dsc != null) { - String objTypes = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES); + String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES); LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"" + objTypes + "\""); if (objTypes != null && objTypes.length() > 0) { objTypes = objTypes.toLowerCase(); @@ -776,7 +789,7 @@ private MDatabase getMDatabase(String name) throws NoSuchObjectException { Query query = null; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); + name = normalizeIdentifier(name); query = pm.newQuery(MDatabase.class, "name == dbname"); query.declareParameters("java.lang.String dbname"); query.setUnique(true); @@ -892,7 +905,7 @@ public boolean alterDatabase(String dbName, Database db) public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { boolean success = false; LOG.info("Dropping database " + dbname + " along with all tables"); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + dbname = normalizeIdentifier(dbname); QueryWrapper queryWrapper = new QueryWrapper(); try { openTransaction(); @@ -934,7 +947,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc query.setResult("name"); query.setOrdering("name ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - databases = new ArrayList(); + databases = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { databases.add((String) i.next()); } @@ -957,7 +970,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc try { query = pm.newQuery(queryStr); query.setResult("name"); - databases = new ArrayList((Collection) query.execute()); + databases = new ArrayList<>((Collection) query.execute()); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -967,7 +980,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } private MType getMType(Type type) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); if (type.getFields() != null) { for (FieldSchema field : type.getFields()) { fields.add(new MFieldSchema(field.getName(), field.getType(), field @@ -978,7 +991,7 @@ private MType getMType(Type type) { } private Type getType(MType mtype) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); if (mtype.getFields() != null) { for (MFieldSchema field : mtype.getFields()) { fields.add(new FieldSchema(field.getName(), field.getType(), field @@ -1092,7 +1105,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException pm.makePersistent(mtbl); PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); - List toPersistPrivObjs = new ArrayList(); + List toPersistPrivObjs = new ArrayList<>(); if (principalPrivs != null) { int now = (int)(System.currentTimeMillis()/1000); @@ -1206,11 +1219,11 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, private List listAllTableConstraintsWithOptionalConstraintName (String dbName, String tableName, String constraintname) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - constraintname = constraintname!=null?HiveStringUtils.normalizeIdentifier(constraintname):null; + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + constraintname = constraintname!=null?normalizeIdentifier(constraintname):null; List mConstraints = null; - List constraintNames = new ArrayList(); + List constraintNames = new ArrayList<>(); Query query = null; try { @@ -1236,7 +1249,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, query.setFilter("param.contains(constraintName)"); query.declareParameters("java.util.Collection param"); Collection constraints = (Collection)query.execute(constraintNames); - mConstraints = new ArrayList(); + mConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currConstraint = (MConstraint) i.next(); mConstraints.add(currConstraint); @@ -1277,7 +1290,7 @@ public Table getTable(String dbName, String tableName) throws MetaException { List tbls = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); // Take the pattern and split it on the | to get all the composing // patterns List parameterVals = new ArrayList<>(); @@ -1295,7 +1308,7 @@ public Table getTable(String dbName, String tableName) throws MetaException { query.setResult("tableName"); query.setOrdering("tableName ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - tbls = new ArrayList(); + tbls = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { tbls.add((String) i.next()); } @@ -1344,7 +1357,7 @@ private int getObjectCount(String fieldName, String objName) { boolean commited = false; Query query = null; - List metas = new ArrayList(); + List metas = new ArrayList<>(); try { openTransaction(); // Take the pattern and split it on the | to get all the composing @@ -1383,7 +1396,7 @@ private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String private StringBuilder appendPatternCondition(StringBuilder builder, String fieldName, String elements, List parameters) { - elements = HiveStringUtils.normalizeIdentifier(elements); + elements = normalizeIdentifier(elements); return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters); } @@ -1442,8 +1455,8 @@ private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); - table = HiveStringUtils.normalizeIdentifier(table); + db = normalizeIdentifier(db); + table = normalizeIdentifier(table); query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); query.declareParameters("java.lang.String table, java.lang.String db"); query.setUnique(true); @@ -1471,13 +1484,13 @@ private MTable getMTable(String db, String table) { @Override public List getTableObjectsByName(String db, List tbl_names) throws MetaException, UnknownDBException { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); boolean committed = false; Query dbExistsQuery = null; Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); + db = normalizeIdentifier(db); dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); dbExistsQuery.declareParameters("java.lang.String db"); dbExistsQuery.setUnique(true); @@ -1487,9 +1500,9 @@ private MTable getMTable(String db, String table) { throw new UnknownDBException("Could not find database " + db); } - List lowered_tbl_names = new ArrayList(); + List lowered_tbl_names = new ArrayList<>(); for (String t : tbl_names) { - lowered_tbl_names.add(HiveStringUtils.normalizeIdentifier(t)); + lowered_tbl_names.add(normalizeIdentifier(t)); } query = pm.newQuery(MTable.class); query.setFilter("database.name == db && tbl_names.contains(tableName)"); @@ -1516,7 +1529,7 @@ private MTable getMTable(String db, String table) { /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */ private Map convertMap(Map dnMap) { return MetaStoreUtils.trimMapNulls(dnMap, - HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); + MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS)); } private Table convertToTable(MTable mtbl) throws MetaException { @@ -1573,7 +1586,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, } // A new table is always created with a new column descriptor - return new MTable(HiveStringUtils.normalizeIdentifier(tbl.getTableName()), mdb, + return new MTable(normalizeIdentifier(tbl.getTableName()), mdb, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), @@ -1584,7 +1597,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, private List convertToMFieldSchemas(List keys) { List mkeys = null; if (keys != null) { - mkeys = new ArrayList(keys.size()); + mkeys = new ArrayList<>(keys.size()); for (FieldSchema part : keys) { mkeys.add(new MFieldSchema(part.getName().toLowerCase(), part.getType(), part.getComment())); @@ -1596,7 +1609,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, private List convertToFieldSchemas(List mkeys) { List keys = null; if (mkeys != null) { - keys = new ArrayList(mkeys.size()); + keys = new ArrayList<>(mkeys.size()); for (MFieldSchema part : mkeys) { keys.add(new FieldSchema(part.getName(), part.getType(), part .getComment())); @@ -1608,9 +1621,9 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, private List convertToMOrders(List keys) { List mkeys = null; if (keys != null) { - mkeys = new ArrayList(keys.size()); + mkeys = new ArrayList<>(keys.size()); for (Order part : keys) { - mkeys.add(new MOrder(HiveStringUtils.normalizeIdentifier(part.getCol()), part.getOrder())); + mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder())); } } return mkeys; @@ -1619,7 +1632,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, private List convertToOrders(List mkeys) { List keys = null; if (mkeys != null) { - keys = new ArrayList(mkeys.size()); + keys = new ArrayList<>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } @@ -1691,9 +1704,9 @@ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) private List> convertToSkewedValues(List mLists) { List> lists = null; if (mLists != null) { - lists = new ArrayList>(mLists.size()); + lists = new ArrayList<>(mLists.size()); for (MStringList element : mLists) { - lists.add(new ArrayList(element.getInternalList())); + lists.add(new ArrayList<>(element.getInternalList())); } } return lists; @@ -1702,7 +1715,7 @@ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) private List convertToMStringLists(List> mLists) { List lists = null ; if (null != mLists) { - lists = new ArrayList(); + lists = new ArrayList<>(); for (List mList : mLists) { lists.add(new MStringList(mList)); } @@ -1718,10 +1731,10 @@ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) private Map, String> covertToSkewedMap(Map mMap) { Map, String> map = null; if (mMap != null) { - map = new HashMap, String>(mMap.size()); + map = new HashMap<>(mMap.size()); Set keys = mMap.keySet(); for (MStringList key : keys) { - map.put(new ArrayList(key.getInternalList()), mMap.get(key)); + map.put(new ArrayList<>(key.getInternalList()), mMap.get(key)); } } return map; @@ -1735,7 +1748,7 @@ private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) private Map covertToMapMStringList(Map, String> mMap) { Map map = null; if (mMap != null) { - map = new HashMap(mMap.size()); + map = new HashMap<>(mMap.size()); Set> keys = mMap.keySet(); for (List key : keys) { map.put(new MStringList(key), mMap.get(key)); @@ -1800,7 +1813,7 @@ public boolean addPartitions(String dbName, String tblName, List part tabGrants = this.listAllTableGrants(dbName, tblName); tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); } - List toPersist = new ArrayList(); + List toPersist = new ArrayList<>(); for (Partition part : parts) { if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { throw new MetaException("Partition does not belong to target table " @@ -1928,7 +1941,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, pm.makePersistent(mpart); int now = (int)(System.currentTimeMillis()/1000); - List toPersist = new ArrayList(); + List toPersist = new ArrayList<>(); if (tabGrants != null) { for (MTablePrivilege tab: tabGrants) { MPartitionPrivilege partGrant = new MPartitionPrivilege(tab @@ -1985,8 +1998,8 @@ private MPartition getMPartition(String dbName, String tableName, List p Query query = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); MTable mtbl = getMTable(dbName, tableName); if (mtbl == null) { commited = commitTransaction(); @@ -2152,7 +2165,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio openTransaction(); if (part != null) { List schemas = part.getTable().getPartitionKeys(); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); for (MFieldSchema col: schemas) { colNames.add(col.getName()); } @@ -2235,7 +2248,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio try { openTransaction(); List mparts = listMPartitions(dbName, tblName, max, queryWrapper); - List parts = new ArrayList(mparts.size()); + List parts = new ArrayList<>(mparts.size()); if (mparts != null && mparts.size()>0) { for (MPartition mpart : mparts) { MTable mtbl = mpart.getTable(); @@ -2301,7 +2314,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, return dest; } if (dest == null) { - dest = new ArrayList(src.size()); + dest = new ArrayList<>(src.size()); } for (MPartition mp : src) { dest.add(convertToPart(mp)); @@ -2312,7 +2325,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, private List convertToParts(String dbName, String tblName, List mparts) throws MetaException { - List parts = new ArrayList(mparts.size()); + List parts = new ArrayList<>(mparts.size()); for (MPartition mp : mparts) { parts.add(convertToPart(dbName, tblName, mp)); Deadline.checkTimeout(); @@ -2584,9 +2597,9 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam } private List getPartitionNamesNoTxn(String dbName, String tableName, short max) { - List pns = new ArrayList(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + List pns = new ArrayList<>(); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Query query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + "where table.database.name == t1 && table.tableName == t2 " @@ -2625,8 +2638,8 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam private Collection getPartitionPsQueryResults(String dbName, String tableName, List part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper) throws MetaException, NoSuchObjectException { - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Table table = getTable(dbName, tableName); if (table == null) { throw new NoSuchObjectException(dbName + "." + tableName + " table not found"); @@ -2670,7 +2683,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, public List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2703,7 +2716,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, @Override public List listPartitionNamesPs(String dbName, String tableName, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { - List partitionNames = new ArrayList(); + List partitionNames = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -2729,8 +2742,8 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, try { openTransaction(); LOG.debug("Executing listMPartitions"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); query.setOrdering("partitionName ascending"); @@ -2797,7 +2810,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin } } // We couldn't do SQL filter pushdown. Get names via normal means. - List partNames = new LinkedList(); + List partNames = new LinkedList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); @@ -2813,7 +2826,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin } if (result == null) { // We couldn't do JDOQL filter pushdown. Get names via normal means. - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); result = getPartitionsViaOrmFilter(dbName, tblName, partNames); @@ -2840,7 +2853,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, result.addAll(getPartitionNamesNoTxn( table.getDbName(), table.getTableName(), maxParts)); if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result); } @@ -2857,7 +2870,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, */ private List getPartitionsViaOrmFilter(Table table, ExpressionTree tree, short maxParts, boolean isValidatedFilter) throws MetaException { - Map params = new HashMap(); + Map params = new HashMap<>(); String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { @@ -2885,7 +2898,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) throws MetaException { - Map params = new HashMap(); + Map params = new HashMap<>(); String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; @@ -2914,7 +2927,7 @@ private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, b private List getPartitionsViaOrmFilter( String dbName, String tblName, List partNames) throws MetaException { if (partNames.isEmpty()) { - return new ArrayList(); + return new ArrayList<>(); } ObjectPair> queryWithParams = getPartQueryWithParams(dbName, tblName, partNames); @@ -2957,7 +2970,7 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par @SuppressWarnings("unchecked") List sds = (List)query.executeWithMap( queryWithParams.getSecond()); - HashSet candidateCds = new HashSet(); + HashSet candidateCds = new HashSet<>(); for (MStorageDescriptor sd : sds) { if (sd != null && sd.getCD() != null) { candidateCds.add(sd.getCD()); @@ -2974,7 +2987,7 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par String tblName, List partNames) { StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 && ("); int n = 0; - Map params = new HashMap(); + Map params = new HashMap<>(); for (Iterator itr = partNames.iterator(); itr.hasNext();) { String pn = "p" + n; n++; @@ -2988,10 +3001,10 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par Query query = pm.newQuery(); query.setFilter(sb.toString()); LOG.debug(" JDOQL filter is " + sb.toString()); - params.put("t1", HiveStringUtils.normalizeIdentifier(tblName)); - params.put("t2", HiveStringUtils.normalizeIdentifier(dbName)); + params.put("t1", normalizeIdentifier(tblName)); + params.put("t2", normalizeIdentifier(dbName)); query.declareParameters(makeParameterDeclarationString(params)); - return new ObjectPair>(query, params); + return new ObjectPair<>(query, params); } @Override @@ -3015,9 +3028,9 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; - this.dbName = HiveStringUtils.normalizeIdentifier(dbName); + this.dbName = normalizeIdentifier(dbName); if (tblName != null){ - this.tblName = HiveStringUtils.normalizeIdentifier(tblName); + this.tblName = normalizeIdentifier(tblName); } else { // tblName can be null in cases of Helper being used at a higher // abstraction level, such as with datbases @@ -3030,8 +3043,8 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ // SQL usage inside a larger transaction (e.g. droptable) may not be desirable because // some databases (e.g. Postgres) abort the entire transaction when any query fails, so // the fallback from failed SQL to JDO is not possible. - boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL) - && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); + boolean isConfigEnabled = MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL) + && (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL_DDL) || !isInTxn); if (isConfigEnabled && directSql == null) { dbType = determineDatabaseProduct(); directSql = new MetaStoreDirectSql(pm, getConf(), ""); @@ -3250,7 +3263,7 @@ protected String describeResult() { protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter); - }; + } @Override protected Integer getSqlResult(GetHelper ctx) throws MetaException { @@ -3302,7 +3315,7 @@ protected Integer getJdoResult( // if numPartitions could not be obtained from ORM filters, then get number partitions names, and count them if (numPartitions == null) { - List filteredPartNames = new ArrayList(); + List filteredPartNames = new ArrayList<>(); getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), tempExpr, "", (short) -1, filteredPartNames); numPartitions = filteredPartNames.size(); } @@ -3323,7 +3336,7 @@ protected Integer getJdoResult( @Override protected boolean canUseDirectSql(GetHelper> ctx) throws MetaException { return directSql.generateSqlFilterForPushdown(ctx.getTable(), tree, filter); - }; + } @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { @@ -3437,12 +3450,12 @@ private String makeParameterDeclarationStringObj(Map params) { throws MetaException { boolean success = false; Query query = null; - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listTableNamesByFilter"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - Map params = new HashMap(); + dbName = normalizeIdentifier(dbName); + Map params = new HashMap<>(); String queryFilterString = makeQueryFilterString(dbName, null, filter, params); query = pm.newQuery(MTable.class); query.declareImports("import java.lang.String"); @@ -3461,11 +3474,11 @@ private String makeParameterDeclarationStringObj(Map params) { query.setFilter(queryFilterString); Collection names = (Collection)query.executeWithMap(params); // have to emulate "distinct", otherwise tables with the same name may be returned - Set tableNamesSet = new HashSet(); + Set tableNamesSet = new HashSet<>(); for (Iterator i = names.iterator(); i.hasNext();) { tableNamesSet.add((String) i.next()); } - tableNames = new ArrayList(tableNamesSet); + tableNames = new ArrayList<>(tableNamesSet); LOG.debug("Done executing query for listTableNamesByFilter"); success = commitTransaction(); LOG.debug("Done retrieving all objects for listTableNamesByFilter"); @@ -3480,19 +3493,19 @@ private String makeParameterDeclarationStringObj(Map params) { short maxParts) throws MetaException { boolean success = false; Query query = null; - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); MTable mtable = getMTable(dbName, tableName); if (mtable == null) { // To be consistent with the behavior of listPartitionNames, if the // table or db does not exist, we return an empty list return partNames; } - Map params = new HashMap(); + Map params = new HashMap<>(); String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " @@ -3508,7 +3521,7 @@ private String makeParameterDeclarationStringObj(Map params) { query.setOrdering("partitionName ascending"); query.setResult("partitionName"); Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList(); + partNames = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { partNames.add((String) i.next()); } @@ -3527,8 +3540,8 @@ public void alterTable(String dbname, String name, Table newTable) boolean success = false; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); MTable newt = convertToMTable(newTable); if (newt == null) { throw new InvalidObjectException("new table is invalid"); @@ -3541,7 +3554,7 @@ public void alterTable(String dbname, String name, Table newTable) // For now only alter name, owner, parameters, cols, bucketcols are allowed oldt.setDatabase(newt.getDatabase()); - oldt.setTableName(HiveStringUtils.normalizeIdentifier(newt.getTableName())); + oldt.setTableName(normalizeIdentifier(newt.getTableName())); oldt.setParameters(newt.getParameters()); oldt.setOwner(newt.getOwner()); // Fully copy over the contents of the new SD into the old SD, @@ -3570,9 +3583,9 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new boolean success = false; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); - baseTblName = HiveStringUtils.normalizeIdentifier(baseTblName); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + baseTblName = normalizeIdentifier(baseTblName); + dbname = normalizeIdentifier(dbname); MIndex newi = convertToMIndex(newIndex); if (newi == null) { throw new InvalidObjectException("new index is invalid"); @@ -3597,8 +3610,8 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new private void alterPartitionNoTxn(String dbname, String name, List part_vals, Partition newPart) throws InvalidObjectException, MetaException { - name = HiveStringUtils.normalizeIdentifier(name); - dbname = HiveStringUtils.normalizeIdentifier(dbname); + name = normalizeIdentifier(name); + dbname = normalizeIdentifier(dbname); MPartition oldp = getMPartition(dbname, name, part_vals); MPartition newp = convertToMPart(newPart, false); if (oldp == null || newp == null) { @@ -3811,7 +3824,7 @@ private boolean constraintNameAlreadyExists(String name) { String constraintNameIfExists = null; try { openTransaction(); - name = HiveStringUtils.normalizeIdentifier(name); + name = normalizeIdentifier(name); constraintExistsQuery = pm.newQuery(MConstraint.class, "constraintName == name"); constraintExistsQuery.declareParameters("java.lang.String name"); constraintExistsQuery.setUnique(true); @@ -3894,7 +3907,7 @@ private String getGuidFromDB() throws MetaException { query = pm.newQuery(MMetastoreDBProperties.class, "this.propertyKey == key"); query.declareParameters("java.lang.String key"); Collection names = (Collection) query.execute("guid"); - List uuids = new ArrayList(); + List uuids = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { String uuid = i.next().getPropertyValue(); LOG.debug("Found guid " + uuid); @@ -3918,17 +3931,17 @@ private String getGuidFromDB() throws MetaException { private List addForeignKeys( List fks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List fkNames = new ArrayList(); - List mpkfks = new ArrayList(); + List fkNames = new ArrayList<>(); + List mpkfks = new ArrayList<>(); String currentConstraintName = null; for (int i = 0; i < fks.size(); i++) { - final String pkTableDB = HiveStringUtils.normalizeIdentifier(fks.get(i).getPktable_db()); - final String pkTableName = HiveStringUtils.normalizeIdentifier(fks.get(i).getPktable_name()); - final String pkColumnName =HiveStringUtils.normalizeIdentifier(fks.get(i).getPkcolumn_name()); - final String fkTableDB = HiveStringUtils.normalizeIdentifier(fks.get(i).getFktable_db()); - final String fkTableName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFktable_name()); - final String fkColumnName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFkcolumn_name()); + final String pkTableDB = normalizeIdentifier(fks.get(i).getPktable_db()); + final String pkTableName = normalizeIdentifier(fks.get(i).getPktable_name()); + final String pkColumnName =normalizeIdentifier(fks.get(i).getPkcolumn_name()); + final String fkTableDB = normalizeIdentifier(fks.get(i).getFktable_db()); + final String fkTableName = normalizeIdentifier(fks.get(i).getFktable_name()); + final String fkColumnName = normalizeIdentifier(fks.get(i).getFkcolumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -3971,7 +3984,7 @@ private String getGuidFromDB() throws MetaException { fkTableDB, fkTableName, pkTableDB, pkTableName, pkColumnName, fkColumnName, "fk"); } } else { - currentConstraintName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFk_name()); + currentConstraintName = normalizeIdentifier(fks.get(i).getFk_name()); } fkNames.add(currentConstraintName); Integer updateRule = fks.get(i).getUpdate_rule(); @@ -4006,14 +4019,14 @@ private String getGuidFromDB() throws MetaException { private List addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List pkNames = new ArrayList(); - List mpks = new ArrayList(); + List pkNames = new ArrayList<>(); + List mpks = new ArrayList<>(); String constraintName = null; for (int i = 0; i < pks.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(pks.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(pks.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(pks.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(pks.get(i).getTable_db()); + final String tableName = normalizeIdentifier(pks.get(i).getTable_name()); + final String columnName = normalizeIdentifier(pks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4040,7 +4053,7 @@ private String getGuidFromDB() throws MetaException { constraintName = generateConstraintName(tableDB, tableName, columnName, "pk"); } } else { - constraintName = HiveStringUtils.normalizeIdentifier(pks.get(i).getPk_name()); + constraintName = normalizeIdentifier(pks.get(i).getPk_name()); } pkNames.add(constraintName); int enableValidateRely = (pks.get(i).isEnable_cstr() ? 4 : 0) + @@ -4072,14 +4085,14 @@ private String getGuidFromDB() throws MetaException { private List addUniqueConstraints(List uks, boolean retrieveCD) throws InvalidObjectException, MetaException { - List ukNames = new ArrayList(); - List cstrs = new ArrayList(); + List ukNames = new ArrayList<>(); + List cstrs = new ArrayList<>(); String constraintName = null; for (int i = 0; i < uks.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(uks.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(uks.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(uks.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(uks.get(i).getTable_db()); + final String tableName = normalizeIdentifier(uks.get(i).getTable_name()); + final String columnName = normalizeIdentifier(uks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4100,7 +4113,7 @@ private String getGuidFromDB() throws MetaException { constraintName = generateConstraintName(tableDB, tableName, columnName, "uk"); } } else { - constraintName = HiveStringUtils.normalizeIdentifier(uks.get(i).getUk_name()); + constraintName = normalizeIdentifier(uks.get(i).getUk_name()); } ukNames.add(constraintName); @@ -4133,14 +4146,14 @@ private String getGuidFromDB() throws MetaException { private List addNotNullConstraints(List nns, boolean retrieveCD) throws InvalidObjectException, MetaException { - List nnNames = new ArrayList(); - List cstrs = new ArrayList(); + List nnNames = new ArrayList<>(); + List cstrs = new ArrayList<>(); String constraintName = null; for (int i = 0; i < nns.size(); i++) { - final String tableDB = HiveStringUtils.normalizeIdentifier(nns.get(i).getTable_db()); - final String tableName = HiveStringUtils.normalizeIdentifier(nns.get(i).getTable_name()); - final String columnName = HiveStringUtils.normalizeIdentifier(nns.get(i).getColumn_name()); + final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); + final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); + final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. @@ -4159,7 +4172,7 @@ private String getGuidFromDB() throws MetaException { if (nns.get(i).getNn_name() == null) { constraintName = generateConstraintName(tableDB, tableName, columnName, "nn"); } else { - constraintName = HiveStringUtils.normalizeIdentifier(nns.get(i).getNn_name()); + constraintName = normalizeIdentifier(nns.get(i).getNn_name()); } nnNames.add(constraintName); @@ -4224,7 +4237,7 @@ private MIndex convertToMIndex(Index index) throws InvalidObjectException, "Underlying index table does not exist for the given index."); } - return new MIndex(HiveStringUtils.normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), + return new MIndex(normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), index.getLastAccessTime(), index.getParameters(), indexTable, msd, index.getIndexHandlerClass(), index.isDeferredRebuild()); } @@ -4255,8 +4268,8 @@ private MIndex getMIndex(String dbName, String originalTblName, String indexName Query query = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - originalTblName = HiveStringUtils.normalizeIdentifier(originalTblName); + dbName = normalizeIdentifier(dbName); + originalTblName = normalizeIdentifier(originalTblName); MTable mtbl = getMTable(dbName, originalTblName); if (mtbl == null) { commited = commitTransaction(); @@ -4269,7 +4282,7 @@ private MIndex getMIndex(String dbName, String originalTblName, String indexName query.setUnique(true); midx = (MIndex) query.execute(originalTblName, dbName, - HiveStringUtils.normalizeIdentifier(indexName)); + normalizeIdentifier(indexName)); pm.retrieve(midx); commited = commitTransaction(); } finally { @@ -4319,15 +4332,15 @@ private Index convertToIndex(MIndex mIndex) throws MetaException { LOG.debug("Executing getIndexes"); openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - origTableName = HiveStringUtils.normalizeIdentifier(origTableName); + dbName = normalizeIdentifier(dbName); + origTableName = normalizeIdentifier(origTableName); query = pm.newQuery(MIndex.class, "origTable.tableName == t1 && origTable.database.name == t2"); query.declareParameters("java.lang.String t1, java.lang.String t2"); List mIndexes = (List) query.execute(origTableName, dbName); pm.retrieveAll(mIndexes); - List indexes = new ArrayList(mIndexes.size()); + List indexes = new ArrayList<>(mIndexes.size()); for (MIndex mIdx : mIndexes) { indexes.add(this.convertToIndex(mIdx)); } @@ -4343,14 +4356,14 @@ private Index convertToIndex(MIndex mIndex) throws MetaException { @Override public List listIndexNames(String dbName, String origTableName, short max) throws MetaException { - List pns = new ArrayList(); + List pns = new ArrayList<>(); boolean success = false; Query query = null; try { openTransaction(); LOG.debug("Executing listIndexNames"); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - origTableName = HiveStringUtils.normalizeIdentifier(origTableName); + dbName = normalizeIdentifier(dbName); + origTableName = normalizeIdentifier(origTableName); query = pm.newQuery("select indexName from org.apache.hadoop.hive.metastore.model.MIndex " + "where origTable.database.name == t1 && origTable.tableName == t2 " @@ -4568,7 +4581,7 @@ public boolean removeRole(String roleName) throws MetaException, */ private Set listAllRolesInHierarchy(String userName, List groupNames) { - List ret = new ArrayList(); + List ret = new ArrayList<>(); if(userName != null) { ret.addAll(listMRoles(userName, PrincipalType.USER)); } @@ -4578,7 +4591,7 @@ public boolean removeRole(String roleName) throws MetaException, } } // get names of these roles and its ancestors - Set roleNames = new HashSet(); + Set roleNames = new HashSet<>(); getAllRoleAncestors(roleNames, ret); return roleNames; } @@ -4607,7 +4620,7 @@ private void getAllRoleAncestors(Set processedRoleNames, List PrincipalType principalType) { boolean success = false; Query query = null; - List mRoleMember = new ArrayList(); + List mRoleMember = new ArrayList<>(); try { LOG.debug("Executing listRoles"); @@ -4630,7 +4643,10 @@ private void getAllRoleAncestors(Set processedRoleNames, List if (principalType == PrincipalType.USER) { // All users belong to public role implicitly, add that role - MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC); + // TODO MS-SPLIT Change this back to HiveMetaStore.PUBLIC once HiveMetaStore has moved to + // stand-alone metastore. + //MRole publicRole = new MRole(HiveMetaStore.PUBLIC, 0, HiveMetaStore.PUBLIC); + MRole publicRole = new MRole("public", 0, "public"); mRoleMember.add(new MRoleMap(principalName, principalType.toString(), publicRole, 0, null, null, false)); } @@ -4640,7 +4656,7 @@ private void getAllRoleAncestors(Set processedRoleNames, List @Override public List listRoles(String principalName, PrincipalType principalType) { - List result = new ArrayList(); + List result = new ArrayList<>(); List roleMaps = listMRoles(principalName, principalType); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -4655,7 +4671,7 @@ private void getAllRoleAncestors(Set processedRoleNames, List @Override public List listRolesWithGrants(String principalName, PrincipalType principalType) { - List result = new ArrayList(); + List result = new ArrayList<>(); List roleMaps = listMRoles(principalName, principalType); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -4739,7 +4755,7 @@ private MRole getMRole(String roleName) { query = pm.newQuery("select roleName from org.apache.hadoop.hive.metastore.model.MRole"); query.setResult("roleName"); Collection names = (Collection) query.execute(); - List roleNames = new ArrayList(); + List roleNames = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { roleNames.add((String) i.next()); } @@ -4760,8 +4776,8 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, if (userName != null) { List user = this.listPrincipalMGlobalGrants(userName, PrincipalType.USER); if(user.size()>0) { - Map> userPriv = new HashMap>(); - List grantInfos = new ArrayList(user.size()); + Map> userPriv = new HashMap<>(); + List grantInfos = new ArrayList<>(user.size()); for (int i = 0; i < user.size(); i++) { MGlobalPrivilege item = user.get(i); grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item @@ -4773,12 +4789,12 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } } if (groupNames != null && groupNames.size() > 0) { - Map> groupPriv = new HashMap>(); + Map> groupPriv = new HashMap<>(); for(String groupName: groupNames) { List group = this.listPrincipalMGlobalGrants(groupName, PrincipalType.GROUP); if(group.size()>0) { - List grantInfos = new ArrayList(group.size()); + List grantInfos = new ArrayList<>(group.size()); for (int i = 0; i < group.size(); i++) { MGlobalPrivilege item = group.get(i); grantInfos.add(new PrivilegeGrantInfo(item.getPrivilege(), item @@ -4802,13 +4818,13 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, public List getDBPrivilege(String dbName, String principalName, PrincipalType principalType) throws InvalidObjectException, MetaException { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameDbPriv = this.listPrincipalMDBGrants( principalName, principalType, dbName); if (userNameDbPriv != null && userNameDbPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameDbPriv.size()); for (int i = 0; i < userNameDbPriv.size(); i++) { MDBPrivilege item = userNameDbPriv.get(i); @@ -4819,7 +4835,7 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } @@ -4828,19 +4844,19 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); try { openTransaction(); if (userName != null) { - Map> dbUserPriv = new HashMap>(); + Map> dbUserPriv = new HashMap<>(); dbUserPriv.put(userName, getDBPrivilege(dbName, userName, PrincipalType.USER)); ret.setUserPrivileges(dbUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> dbGroupPriv = new HashMap>(); + Map> dbGroupPriv = new HashMap<>(); for (String groupName : groupNames) { dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, PrincipalType.GROUP)); @@ -4849,7 +4865,7 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> dbRolePriv = new HashMap>(); + Map> dbRolePriv = new HashMap<>(); for (String roleName : roleNames) { dbRolePriv .put(roleName, getDBPrivilege(dbName, roleName, PrincipalType.ROLE)); @@ -4871,19 +4887,19 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { - Map> partUserPriv = new HashMap>(); + Map> partUserPriv = new HashMap<>(); partUserPriv.put(userName, getPartitionPrivilege(dbName, tableName, partition, userName, PrincipalType.USER)); ret.setUserPrivileges(partUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> partGroupPriv = new HashMap>(); + Map> partGroupPriv = new HashMap<>(); for (String groupName : groupNames) { partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, partition, groupName, PrincipalType.GROUP)); @@ -4892,7 +4908,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> partRolePriv = new HashMap>(); + Map> partRolePriv = new HashMap<>(); for (String roleName : roleNames) { partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, partition, roleName, PrincipalType.ROLE)); @@ -4914,19 +4930,19 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { - Map> tableUserPriv = new HashMap>(); + Map> tableUserPriv = new HashMap<>(); tableUserPriv.put(userName, getTablePrivilege(dbName, tableName, userName, PrincipalType.USER)); ret.setUserPrivileges(tableUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> tableGroupPriv = new HashMap>(); + Map> tableGroupPriv = new HashMap<>(); for (String groupName : groupNames) { tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, groupName, PrincipalType.GROUP)); @@ -4935,7 +4951,7 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> tableRolePriv = new HashMap>(); + Map> tableRolePriv = new HashMap<>(); for (String roleName : roleNames) { tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, roleName, PrincipalType.ROLE)); @@ -4956,22 +4972,22 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); try { openTransaction(); if (userName != null) { - Map> columnUserPriv = new HashMap>(); + Map> columnUserPriv = new HashMap<>(); columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, columnName, partitionName, userName, PrincipalType.USER)); ret.setUserPrivileges(columnUserPriv); } if (groupNames != null && groupNames.size() > 0) { - Map> columnGroupPriv = new HashMap>(); + Map> columnGroupPriv = new HashMap<>(); for (String groupName : groupNames) { columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, columnName, partitionName, groupName, PrincipalType.GROUP)); @@ -4980,7 +4996,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, } Set roleNames = listAllRolesInHierarchy(userName, groupNames); if (roleNames != null && roleNames.size() > 0) { - Map> columnRolePriv = new HashMap>(); + Map> columnRolePriv = new HashMap<>(); for (String roleName : roleNames) { columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, columnName, partitionName, roleName, PrincipalType.ROLE)); @@ -5000,15 +5016,15 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, String partName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameTabPartPriv = this .listPrincipalMPartitionGrants(principalName, principalType, dbName, tableName, partName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); for (int i = 0; i < userNameTabPartPriv.size(); i++) { MPartitionPrivilege item = userNameTabPartPriv.get(i); @@ -5020,7 +5036,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } private PrincipalType getPrincipalTypeFromStr(String str) { @@ -5029,15 +5045,15 @@ private PrincipalType getPrincipalTypeFromStr(String str) { private List getTablePrivilege(String dbName, String tableName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameTabPartPriv = this .listAllMTableGrants(principalName, principalType, dbName, tableName); if (userNameTabPartPriv != null && userNameTabPartPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); for (int i = 0; i < userNameTabPartPriv.size(); i++) { MTablePrivilege item = userNameTabPartPriv.get(i); @@ -5048,23 +5064,23 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } private List getColumnPrivilege(String dbName, String tableName, String columnName, String partitionName, String principalName, PrincipalType principalType) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - columnName = HiveStringUtils.normalizeIdentifier(columnName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); if (partitionName == null) { List userNameColumnPriv = this .listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameColumnPriv.size()); for (int i = 0; i < userNameColumnPriv.size(); i++) { MTableColumnPrivilege item = userNameColumnPriv.get(i); @@ -5079,7 +5095,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { .listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, partitionName, columnName); if (userNameColumnPriv != null && userNameColumnPriv.size() > 0) { - List grantInfos = new ArrayList( + List grantInfos = new ArrayList<>( userNameColumnPriv.size()); for (int i = 0; i < userNameColumnPriv.size(); i++) { MPartitionColumnPrivilege item = userNameColumnPriv.get(i); @@ -5090,7 +5106,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return grantInfos; } } - return new ArrayList(0); + return new ArrayList<>(0); } @Override @@ -5100,13 +5116,13 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce int now = (int) (System.currentTimeMillis() / 1000); try { openTransaction(); - List persistentObjs = new ArrayList(); + List persistentObjs = new ArrayList<>(); List privilegeList = privileges.getPrivileges(); if (privilegeList != null && privilegeList.size() > 0) { Iterator privIter = privilegeList.iterator(); - Set privSet = new HashSet(); + Set privSet = new HashSet<>(); while (privIter.hasNext()) { HiveObjectPrivilege privDef = privIter.next(); HiveObjectRef hiveObject = privDef.getHiveObject(); @@ -5314,7 +5330,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) boolean committed = false; try { openTransaction(); - List persistentObjs = new ArrayList(); + List persistentObjs = new ArrayList<>(); List privilegeList = privileges.getPrivileges(); @@ -5556,7 +5572,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) public List listMRoleMembers(String roleName) { boolean success = false; Query query = null; - List mRoleMemeberList = new ArrayList(); + List mRoleMemeberList = new ArrayList<>(); try { LOG.debug("Executing listRoleMembers"); @@ -5580,7 +5596,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) @Override public List listRoleMembers(String roleName) { List roleMaps = listMRoleMembers(roleName); - List rolePrinGrantList = new ArrayList(); + List rolePrinGrantList = new ArrayList<>(); if (roleMaps != null) { for (MRoleMap roleMap : roleMaps) { @@ -5607,7 +5623,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) PrincipalType principalType) { boolean commited = false; Query query = null; - List userNameDbPriv = new ArrayList(); + List userNameDbPriv = new ArrayList<>(); try { List mPrivs = null; openTransaction(); @@ -5634,9 +5650,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) List mUsers = listPrincipalMGlobalGrants(principalName, principalType); if (mUsers.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mUsers.size(); i++) { MGlobalPrivilege sUsr = mUsers.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -5668,7 +5684,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } private List convertGlobal(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MGlobalPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -5687,8 +5703,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) PrincipalType principalType, String dbName) { boolean success = false; Query query = null; - List mSecurityDBList = new ArrayList(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + List mSecurityDBList = new ArrayList<>(); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listPrincipalDBGrants"); @@ -5717,9 +5733,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) String dbName) { List mDbs = listPrincipalMDBGrants(principalName, principalType, dbName); if (mDbs.isEmpty()) { - return Collections.emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mDbs.size(); i++) { MDBPrivilege sDB = mDbs.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -5756,7 +5772,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } private List convertDB(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MDBPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -5808,11 +5824,11 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) public List listAllTableGrants(String dbName, String tableName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - List mSecurityTabList = new ArrayList(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityTabList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listAllTableGrants"); @@ -5836,11 +5852,11 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) @SuppressWarnings("unchecked") public List listTableAllPartitionGrants(String dbName, String tableName) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; Query query = null; - List mSecurityTabPartList = new ArrayList(); + List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionGrants"); @@ -5865,9 +5881,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) public List listTableAllColumnGrants(String dbName, String tableName) { boolean success = false; Query query = null; - List mTblColPrivilegeList = new ArrayList(); - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + List mTblColPrivilegeList = new ArrayList<>(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); try { LOG.debug("Executing listTableAllColumnGrants"); @@ -5894,9 +5910,9 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) String tableName) { boolean success = false; Query query = null; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionColumnGrants"); @@ -5922,8 +5938,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) public List listPartitionAllColumnGrants(String dbName, String tableName, List partNames) { boolean success = false; - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); List mSecurityColList = null; try { @@ -5954,7 +5970,7 @@ public void dropPartitionAllColumnGrantsNoTxn( @SuppressWarnings("unchecked") private List listDatabaseGrants(String dbName, QueryWrapper queryWrapper) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); boolean success = false; try { LOG.debug("Executing listDatabaseGrants"); @@ -5977,8 +5993,8 @@ public void dropPartitionAllColumnGrantsNoTxn( @SuppressWarnings("unchecked") private List listPartitionGrants(String dbName, String tableName, List partNames) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; List mSecurityTabPartList = null; @@ -6021,8 +6037,8 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List(query, params); + return new ObjectPair<>(query, params); } @SuppressWarnings("unchecked") public List listAllMTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName) { - tableName = HiveStringUtils.normalizeIdentifier(tableName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); boolean success = false; Query query = null; - List mSecurityTabPartList = new ArrayList(); + List mSecurityTabPartList = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listAllTableGrants"); @@ -6076,9 +6092,9 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mTbls = listAllMTableGrants(principalName, principalType, dbName, tableName); if (mTbls.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mTbls.size(); i++) { MTablePrivilege sTbl = mTbls.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6099,9 +6115,9 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionGrants"); @@ -6138,9 +6154,9 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mParts = listPrincipalMPartitionGrants(principalName, principalType, dbName, tableName, partName); if (mParts.isEmpty()) { - return Collections. emptyList(); + return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mParts.size(); i++) { MPartitionPrivilege sPart = mParts.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6163,10 +6179,10 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalTableColumnGrants"); @@ -6203,7 +6219,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mTableCols.size(); i++) { MTableColumnPrivilege sCol = mTableCols.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6225,10 +6241,10 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityColList = new ArrayList(); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + columnName = normalizeIdentifier(columnName); + List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionColumnGrants"); @@ -6268,7 +6284,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List result = new ArrayList(); + List result = new ArrayList<>(); for (int i = 0; i < mPartitionCols.size(); i++) { MPartitionColumnPrivilege sCol = mPartitionCols.get(i); HiveObjectRef objectRef = new HiveObjectRef( @@ -6344,7 +6360,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List convertPartCols(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MPartitionColumnPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6422,8 +6438,8 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listTableGrantsAll(String dbName, String tableName) { boolean success = false; Query query = null; - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tableName = HiveStringUtils.normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); try { openTransaction(); LOG.debug("Executing listTableGrantsAll"); @@ -6444,7 +6460,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List convertTable(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MTablePrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6542,7 +6558,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List convertPartition(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MPartitionPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6624,8 +6640,8 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List convertTableCols(List privs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (MTableColumnPrivilege priv : privs) { String pname = priv.getPrincipalName(); PrincipalType ptype = PrincipalType.valueOf(priv.getPrincipalType()); @@ -6752,7 +6768,7 @@ private String getPartitionStr(Table tbl, Map partName) throws In throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() + " doesn't match with number of supplied partition values: "+partName.size()); } - final List storedVals = new ArrayList(tbl.getPartitionKeysSize()); + final List storedVals = new ArrayList<>(tbl.getPartitionKeysSize()); for(FieldSchema partKey : tbl.getPartitionKeys()){ String partVal = partName.get(partKey.getName()); if(null == partVal) { @@ -6827,7 +6843,7 @@ public long executeJDOQLUpdate(String queryStr) { public Set listFSRoots() { boolean committed = false; Query query = null; - Set fsRoots = new HashSet(); + Set fsRoots = new HashSet<>(); try { openTransaction(); query = pm.newQuery(MDatabase.class); @@ -6919,8 +6935,8 @@ public void setUpdateLocations(Map updateLocations) { public UpdateMDatabaseURIRetVal updateMDatabaseURI(URI oldLoc, URI newLoc, boolean dryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdateMDatabaseURIRetVal retVal = null; try { openTransaction(); @@ -7058,8 +7074,8 @@ public UpdatePropURIRetVal updateMStorageDescriptorTblPropURI(URI oldLoc, URI ne String tblPropKey, boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdatePropURIRetVal retVal = null; try { openTransaction(); @@ -7128,8 +7144,8 @@ public UpdateMStorageDescriptorTblURIRetVal updateMStorageDescriptorTblURI(URI o URI newLoc, boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); int numNullRecords = 0; UpdateMStorageDescriptorTblURIRetVal retVal = null; try { @@ -7208,8 +7224,8 @@ public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, String serdeP boolean isDryRun) { boolean committed = false; Query query = null; - Map updateLocations = new HashMap(); - List badRecords = new ArrayList(); + Map updateLocations = new HashMap<>(); + List badRecords = new ArrayList<>(); UpdateSerdeURIRetVal retVal = null; try { openTransaction(); @@ -7522,10 +7538,9 @@ public ColumnStatistics getTableColumnStatistics(String dbName, String tableName protected ColumnStatistics getTableColumnStatisticsInternal( String dbName, String tableName, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); - return new GetStatHelper(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), allowSql, allowJdo) { + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); + return new GetStatHelper(normalizeIdentifier(dbName), + normalizeIdentifier(tableName), allowSql, allowJdo) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { return directSql.getTableStats(dbName, tblName, colNames, enableBitVector); @@ -7541,7 +7556,7 @@ protected ColumnStatistics getJdoResult( // LastAnalyzed is stored per column, but thrift object has it per multiple columns. // Luckily, nobody actually uses it, so we will set to lowest value of all columns for now. ColumnStatisticsDesc desc = StatObjectConverter.getTableColumnStatisticsDesc(mStats.get(0)); - List statObjs = new ArrayList(mStats.size()); + List statObjs = new ArrayList<>(mStats.size()); for (MTableColumnStatistics mStat : mStats) { if (desc.getLastAnalyzed() > mStat.getLastAnalyzed()) { desc.setLastAnalyzed(mStat.getLastAnalyzed()); @@ -7567,8 +7582,7 @@ protected ColumnStatistics getJdoResult( protected List getPartitionColumnStatisticsInternal( String dbName, String tableName, final List partNames, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetListHelper(dbName, tableName, allowSql, allowJdo) { @Override protected List getSqlResult( @@ -7582,7 +7596,7 @@ protected ColumnStatistics getJdoResult( try { List mStats = getMPartitionColumnStatistics(getTable(), partNames, colNames, queryWrapper); - List result = new ArrayList( + List result = new ArrayList<>( Math.min(mStats.size(), partNames.size())); String lastPartName = null; List curList = null; @@ -7590,7 +7604,7 @@ protected ColumnStatistics getJdoResult( for (int i = 0; i <= mStats.size(); ++i) { boolean isLast = i == mStats.size(); MPartitionColumnStatistics mStatsObj = isLast ? null : mStats.get(i); - String partName = isLast ? null : (String)mStatsObj.getPartitionName(); + String partName = isLast ? null : mStatsObj.getPartitionName(); if (isLast || !partName.equals(lastPartName)) { if (i != 0) { result.add(new ColumnStatistics(csd, curList)); @@ -7599,7 +7613,7 @@ protected ColumnStatistics getJdoResult( continue; } csd = StatObjectConverter.getPartitionColumnStatisticsDesc(mStatsObj); - curList = new ArrayList(colNames.size()); + curList = new ArrayList<>(colNames.size()); } curList.add(StatObjectConverter.getPartitionColumnStatisticsObj(mStatsObj, enableBitVector)); lastPartName = partName; @@ -7617,12 +7631,10 @@ protected ColumnStatistics getJdoResult( @Override public AggrStats get_aggr_stats_for(String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { - final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER); - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), + ConfVars.STATS_NDV_DENSITY_FUNCTION); + final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetHelper(dbName, tblName, true, false) { @Override protected AggrStats getSqlResult(GetHelper ctx) @@ -7648,8 +7660,7 @@ protected String describeResult() { @Override public Map> getColStatsForTablePartitions(String dbName, String tableName) throws MetaException, NoSuchObjectException { - final boolean enableBitVector = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_STATS_FETCH_BITVECTOR); + final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); return new GetHelper>>(dbName, tableName, true, false) { @Override protected Map> getSqlResult( @@ -7791,9 +7802,9 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, query.setUnique(true); mStatsObj = (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(colName)); + normalizeIdentifier(dbName), + normalizeIdentifier(tableName), + normalizeIdentifier(colName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { pm.deletePersistent(mStatsObj); @@ -7804,8 +7815,8 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, } else { mStatsObjColl = (List) query.execute(partName.trim(), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + normalizeIdentifier(dbName), + normalizeIdentifier(tableName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -7860,9 +7871,9 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri if (colName != null) { query.setUnique(true); mStatsObj = - (MTableColumnStatistics) query.execute(HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(colName)); + (MTableColumnStatistics) query.execute(normalizeIdentifier(tableName), + normalizeIdentifier(dbName), + normalizeIdentifier(colName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { @@ -7874,8 +7885,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri } else { mStatsObjColl = (List) query.execute( - HiveStringUtils.normalizeIdentifier(tableName), - HiveStringUtils.normalizeIdentifier(dbName)); + normalizeIdentifier(tableName), + normalizeIdentifier(dbName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -7901,8 +7912,7 @@ public long cleanupEvents() { long delCnt; LOG.debug("Begin executing cleanupEvents"); Long expiryTime = - HiveConf.getTimeVar(getConf(), ConfVars.METASTORE_EVENT_EXPIRY_DURATION, - TimeUnit.MILLISECONDS); + MetastoreConf.getTimeVar(getConf(), ConfVars.EVENT_EXPIRY_DURATION, TimeUnit.MILLISECONDS); Long curTime = System.currentTimeMillis(); try { openTransaction(); @@ -8000,7 +8010,7 @@ public String getToken(String tokenId) { LOG.debug("Begin executing getAllTokenIdentifiers"); boolean committed = false; Query query = null; - List tokenIdents = new ArrayList(); + List tokenIdents = new ArrayList<>(); try { openTransaction(); @@ -8136,8 +8146,7 @@ private synchronized void checkSchema() throws MetaException { return; } - boolean strictValidation = - HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); + boolean strictValidation = MetastoreConf.getBoolVar(getConf(), ConfVars.SCHEMA_VERIFICATION); // read the schema version stored in metastore db String dbSchemaVer = getMetaStoreSchemaVersion(); // version of schema for this version of hive @@ -8149,7 +8158,7 @@ private synchronized void checkSchema() throws MetaException { throw new MetaException("Version information not found in metastore. "); } else { LOG.warn("Version information not found in metastore. " - + HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString() + + + ConfVars.SCHEMA_VERIFICATION.toString() + " is not enabled so recording the schema version " + hiveSchemaVer); setMetaStoreSchemaVersion(hiveSchemaVer, @@ -8167,8 +8176,7 @@ private synchronized void checkSchema() throws MetaException { } else { LOG.error("Version information found in metastore differs " + dbSchemaVer + " from expected schema version " + hiveSchemaVer + - ". Schema verififcation is disabled " + - HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION); + ". Schema verififcation is disabled " + ConfVars.SCHEMA_VERIFICATION); setMetaStoreSchemaVersion(hiveSchemaVer, "Set by MetaStore " + USER + "@" + HOSTNAME); } @@ -8195,7 +8203,7 @@ public String getMetaStoreSchemaVersion() throws MetaException { private MVersionTable getMSchemaVersion() throws NoSuchObjectException, MetaException { boolean committed = false; Query query = null; - List mVerTables = new ArrayList(); + List mVerTables = new ArrayList<>(); try { openTransaction(); query = pm.newQuery(MVersionTable.class); @@ -8234,7 +8242,7 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro MVersionTable mSchemaVer; boolean commited = false; boolean recordVersion = - HiveConf.getBoolVar(getConf(), HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION); + MetastoreConf.getBoolVar(getConf(), ConfVars.SCHEMA_VERIFICATION_RECORD_VERSION); if (!recordVersion) { LOG.warn("setMetaStoreSchemaVersion called but recording version is disabled: " + "version = " + schemaVersion + ", comment = " + comment); @@ -8346,7 +8354,7 @@ private MFunction convertToMFunction(Function func) throws InvalidObjectExceptio private List convertToResourceUriList(List mresourceUriList) { List resourceUriList = null; if (mresourceUriList != null) { - resourceUriList = new ArrayList(mresourceUriList.size()); + resourceUriList = new ArrayList<>(mresourceUriList.size()); for (MResourceUri mres : mresourceUriList) { resourceUriList.add( new ResourceUri(ResourceType.findByValue(mres.getResourceType()), mres.getUri())); @@ -8358,7 +8366,7 @@ private MFunction convertToMFunction(Function func) throws InvalidObjectExceptio private List convertToMResourceUriList(List resourceUriList) { List mresourceUriList = null; if (resourceUriList != null) { - mresourceUriList = new ArrayList(resourceUriList.size()); + mresourceUriList = new ArrayList<>(resourceUriList.size()); for (ResourceUri res : resourceUriList) { mresourceUriList.add(new MResourceUri(res.getResourceType().getValue(), res.getUri())); } @@ -8387,8 +8395,8 @@ public void alterFunction(String dbName, String funcName, Function newFunction) boolean success = false; try { openTransaction(); - funcName = HiveStringUtils.normalizeIdentifier(funcName); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + funcName = normalizeIdentifier(funcName); + dbName = normalizeIdentifier(dbName); MFunction newf = convertToMFunction(newFunction); if (newf == null) { throw new InvalidObjectException("new function is invalid"); @@ -8400,7 +8408,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) } // For now only alter name, owner, class name, type - oldf.setFunctionName(HiveStringUtils.normalizeIdentifier(newf.getFunctionName())); + oldf.setFunctionName(normalizeIdentifier(newf.getFunctionName())); oldf.setDatabase(newf.getDatabase()); oldf.setOwnerName(newf.getOwnerName()); oldf.setOwnerType(newf.getOwnerType()); @@ -8442,8 +8450,8 @@ private MFunction getMFunction(String db, String function) { Query query = null; try { openTransaction(); - db = HiveStringUtils.normalizeIdentifier(db); - function = HiveStringUtils.normalizeIdentifier(function); + db = normalizeIdentifier(db); + function = normalizeIdentifier(function); query = pm.newQuery(MFunction.class, "functionName == function && database.name == db"); query.declareParameters("java.lang.String function, java.lang.String db"); query.setUnique(true); @@ -8496,7 +8504,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException List funcs = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); // Take the pattern and split it on the | to get all the composing // patterns List parameterVals = new ArrayList<>(); @@ -8509,7 +8517,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException query.setResult("functionName"); query.setOrdering("functionName ascending"); Collection names = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); - funcs = new ArrayList(); + funcs = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { funcs.add((String) i.next()); } @@ -8556,7 +8564,7 @@ public NotificationEventResponse getNextNotification(NotificationEventRequest rq private void lockForUpdate() throws MetaException { String selectQuery = "select \"NEXT_EVENT_ID\" from \"NOTIFICATION_SEQUENCE\""; String selectForUpdateQuery = sqlGenerator.addForUpdateClause(selectQuery); - new RetryingExecutor(hiveConf, () -> { + new RetryingExecutor(conf, () -> { Query query = pm.newQuery("javax.jdo.query.SQL", selectForUpdateQuery); query.setUnique(true); // only need to execute it to get db Lock @@ -8576,13 +8584,10 @@ private void lockForUpdate() throws MetaException { private final Command command; RetryingExecutor(Configuration config, Command command) { - this.maxRetries = config.getInt(ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name(), - ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.defaultIntVal); - this.sleepInterval = config.getTimeDuration( - ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL.name(), - ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL.defaultLongVal, - TimeUnit.MILLISECONDS - ); + this.maxRetries = + MetastoreConf.getIntVar(config, ConfVars.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES); + this.sleepInterval = MetastoreConf.getTimeVar(config, + ConfVars.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL, TimeUnit.MILLISECONDS); this.command = command; } @@ -8885,8 +8890,8 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -8914,7 +8919,7 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - primaryKeys = new ArrayList(); + primaryKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); @@ -9010,7 +9015,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro List foreignKeys = null; Collection constraints = null; Query query = null; - Map tblToConstraint = new HashMap(); + Map tblToConstraint = new HashMap<>(); try { openTransaction(); String queryText = (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") @@ -9029,7 +9034,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro paramText = paramText.substring(0, paramText.length()-1); } query.declareParameters(paramText); - List params = new ArrayList(); + List params = new ArrayList<>(); if (parent_tbl_name != null) { params.add(parent_tbl_name); } @@ -9055,7 +9060,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro params.get(2), params.get(3)); } pm.retrieveAll(constraints); - foreignKeys = new ArrayList(); + foreignKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPKFK = (MConstraint) i.next(); int enableValidateRely = currPKFK.getEnableValidateRely(); @@ -9105,8 +9110,8 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro protected List getUniqueConstraintsInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -9136,7 +9141,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - uniqueConstraints = new ArrayList(); + uniqueConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); @@ -9174,8 +9179,8 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro protected List getNotNullConstraintsInternal(final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - final String db_name = HiveStringUtils.normalizeIdentifier(db_name_input); - final String tbl_name = HiveStringUtils.normalizeIdentifier(tbl_name_input); + final String db_name = normalizeIdentifier(db_name_input); + final String tbl_name = normalizeIdentifier(tbl_name_input); return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { @Override @@ -9205,7 +9210,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); Collection constraints = (Collection) query.execute(tbl_name, db_name); pm.retrieveAll(constraints); - notNullConstraints = new ArrayList(); + notNullConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); int enableValidateRely = currPK.getEnableValidateRely(); @@ -9300,4 +9305,9 @@ void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) { public static void setTwoMetastoreTesting(boolean twoMetastoreTesting) { forTwoMetastoreTesting = twoMetastoreTesting; } + + @VisibleForTesting + Properties getProp() { + return prop; + } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java similarity index 86% rename from metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java index 41d7e81748..2671c1fc57 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,12 @@ import org.antlr.runtime.CommonTokenStream; import org.antlr.runtime.RecognitionException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.FilterLexer; @@ -47,7 +49,13 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express try { filter = expressionProxy.convertExprToFilter(expr); } catch (MetaException ex) { - throw new IMetaStoreClient.IncompatibleMetastoreException(ex.getMessage()); + // TODO MS-SPLIT - for now we have construct this by reflection because IMetaStoreClient + // can't be + // moved until after HiveMetaStore is moved, which can't be moved until this is moved. + Class exClass = JavaUtils.getClass( + "org.apache.hadoop.hive.metastore.IMetaStoreClient$IncompatibleMetastoreException", + MetaException.class); + throw JavaUtils.newInstance(exClass, new Class[]{String.class}, new Object[]{ex.getMessage()}); } // Make a tree out of the filter. @@ -68,12 +76,12 @@ public static ExpressionTree makeExpressionTree(PartitionExpressionProxy express * @return The partition expression proxy. */ public static PartitionExpressionProxy createExpressionProxy(Configuration conf) { - String className = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS); + String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS); try { @SuppressWarnings("unchecked") Class clazz = - (Class)MetaStoreUtils.getClass(className); - return MetaStoreUtils.newInstance( + JavaUtils.getClass(className, PartitionExpressionProxy.class); + return JavaUtils.newInstance( clazz, new Class[0], new Object[0]); } catch (MetaException e) { LOG.error("Error loading PartitionExpressionProxy", e); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java similarity index 72% rename from metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 2bc4d99a71..0e6d8a4da8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,8 +26,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -76,10 +76,10 @@ */ @Target(value = ElementType.METHOD) @Retention(value = RetentionPolicy.RUNTIME) - public @interface CanNotRetry { + @interface CanNotRetry { } - public abstract void shutdown(); + void shutdown(); /** * Opens a new one or the one already created Every call of this function must @@ -88,7 +88,7 @@ * @return an active transaction */ - public abstract boolean openTransaction(); + boolean openTransaction(); /** * if this is the commit of the first open call then an actual commit is @@ -97,77 +97,77 @@ * @return true or false */ @CanNotRetry - public abstract boolean commitTransaction(); + boolean commitTransaction(); - public boolean isActiveTransaction(); + boolean isActiveTransaction(); /** * Rolls back the current transaction if it is active */ @CanNotRetry - public abstract void rollbackTransaction(); + void rollbackTransaction(); - public abstract void createDatabase(Database db) + void createDatabase(Database db) throws InvalidObjectException, MetaException; - public abstract Database getDatabase(String name) + Database getDatabase(String name) throws NoSuchObjectException; - public abstract boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; - public abstract boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; - public abstract List getDatabases(String pattern) throws MetaException; + List getDatabases(String pattern) throws MetaException; - public abstract List getAllDatabases() throws MetaException; + List getAllDatabases() throws MetaException; - public abstract boolean createType(Type type); + boolean createType(Type type); - public abstract Type getType(String typeName); + Type getType(String typeName); - public abstract boolean dropType(String typeName); + boolean dropType(String typeName); - public abstract void createTable(Table tbl) throws InvalidObjectException, + void createTable(Table tbl) throws InvalidObjectException, MetaException; - public abstract boolean dropTable(String dbName, String tableName) + boolean dropTable(String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract Table getTable(String dbName, String tableName) + Table getTable(String dbName, String tableName) throws MetaException; - public abstract boolean addPartition(Partition part) + boolean addPartition(Partition part) throws InvalidObjectException, MetaException; - public abstract boolean addPartitions(String dbName, String tblName, List parts) + boolean addPartitions(String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; - public abstract boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) + boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - public abstract Partition getPartition(String dbName, String tableName, + Partition getPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - public abstract boolean doesPartitionExist(String dbName, String tableName, + boolean doesPartitionExist(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - public abstract boolean dropPartition(String dbName, String tableName, + boolean dropPartition(String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - public abstract List getPartitions(String dbName, + List getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException; - public abstract void alterTable(String dbname, String name, Table newTable) + void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - public List getTables(String dbName, String pattern) + List getTables(String dbName, String pattern) throws MetaException; - public List getTables(String dbName, String pattern, TableType tableType) + List getTables(String dbName, String pattern, TableType tableType) throws MetaException; - public List getTableMeta( + List getTableMeta( String dbNames, String tableNames, List tableTypes) throws MetaException; /** @@ -180,10 +180,10 @@ public abstract void alterTable(String dbname, String name, Table newTable) * If there are duplicate names, only one instance of the table will be returned * @throws MetaException */ - public List
getTableObjectsByName(String dbname, List tableNames) + List
getTableObjectsByName(String dbname, List tableNames) throws MetaException, UnknownDBException; - public List getAllTables(String dbName) throws MetaException; + List getAllTables(String dbName) throws MetaException; /** * Gets a list of tables based on a filter string and filter type. @@ -197,127 +197,127 @@ public abstract void alterTable(String dbname, String name, Table newTable) * @throws MetaException * @throws UnknownDBException */ - public abstract List listTableNamesByFilter(String dbName, + List listTableNamesByFilter(String dbName, String filter, short max_tables) throws MetaException, UnknownDBException; - public abstract List listPartitionNames(String db_name, + List listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException; - public abstract PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, - List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException; + PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + List cols, boolean applyDistinct, String filter, boolean ascending, + List order, long maxParts) throws MetaException; - public abstract List listPartitionNamesByFilter(String db_name, + List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException; - public abstract void alterPartition(String db_name, String tbl_name, List part_vals, + void alterPartition(String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; - public abstract void alterPartitions(String db_name, String tbl_name, + void alterPartitions(String db_name, String tbl_name, List> part_vals_list, List new_parts) throws InvalidObjectException, MetaException; - public abstract boolean addIndex(Index index) + boolean addIndex(Index index) throws InvalidObjectException, MetaException; - public abstract Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; + Index getIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; + boolean dropIndex(String dbName, String origTableName, String indexName) throws MetaException; - public abstract List getIndexes(String dbName, + List getIndexes(String dbName, String origTableName, int max) throws MetaException; - public abstract List listIndexNames(String dbName, + List listIndexNames(String dbName, String origTableName, short max) throws MetaException; - public abstract void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws InvalidObjectException, MetaException; - public abstract List getPartitionsByFilter( + List getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; - public abstract boolean getPartitionsByExpr(String dbName, String tblName, + boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException; - public abstract int getNumPartitionsByFilter(String dbName, String tblName, String filter) + int getNumPartitionsByFilter(String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException; - public abstract int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; + int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; - public abstract List getPartitionsByNames( + List getPartitionsByNames( String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; - public abstract Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - public abstract boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - public abstract boolean addRole(String rowName, String ownerName) + boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; + boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; - public abstract boolean grantRole(Role role, String userName, PrincipalType principalType, + boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) throws MetaException, NoSuchObjectException; - public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, + PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; - public abstract List listPrincipalGlobalGrants(String principalName, + List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); - public abstract List listPrincipalDBGrants(String principalName, + List listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName); - public abstract List listAllTableGrants( + List listAllTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName); - public abstract List listPrincipalPartitionGrants( + List listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName); - public abstract List listPrincipalTableColumnGrants( + List listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String columnName); - public abstract List listPrincipalPartitionColumnGrants( + List listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, List partValues, String partName, String columnName); - public abstract boolean grantPrivileges (PrivilegeBag privileges) + boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) + boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) throws InvalidObjectException, MetaException, NoSuchObjectException; - public abstract org.apache.hadoop.hive.metastore.api.Role getRole( + org.apache.hadoop.hive.metastore.api.Role getRole( String roleName) throws NoSuchObjectException; - public List listRoleNames(); + List listRoleNames(); - public List listRoles(String principalName, + List listRoles(String principalName, PrincipalType principalType); - public List listRolesWithGrants(String principalName, + List listRolesWithGrants(String principalName, PrincipalType principalType); @@ -326,14 +326,14 @@ public abstract boolean revokePrivileges (PrivilegeBag privileges, boolean gran * @param roleName * @return */ - public List listRoleMembers(String roleName); + List listRoleMembers(String roleName); - public abstract Partition getPartitionWithAuth(String dbName, String tblName, + Partition getPartitionWithAuth(String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; - public abstract List getPartitionsWithAuth(String dbName, + List getPartitionsWithAuth(String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; @@ -352,7 +352,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws MetaException * @throws NoSuchObjectException */ - public abstract List listPartitionNamesPs(String db_name, String tbl_name, + List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException; @@ -377,7 +377,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws NoSuchObjectException * @throws InvalidObjectException */ - public abstract List listPartitionsPsWithAuth(String db_name, String tbl_name, + List listPartitionsPsWithAuth(String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; @@ -389,7 +389,7 @@ public abstract Partition getPartitionWithAuth(String dbName, String tblName, * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) + boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** Persists the given column statistics object to the metastore @@ -402,7 +402,7 @@ public abstract boolean updateTableColumnStatistics(ColumnStatistics colStats) * @throws InvalidObjectException * @throws InvalidInputException */ - public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -418,14 +418,14 @@ public abstract boolean updatePartitionColumnStatistics(ColumnStatistics statsOb * @throws MetaException * */ - public abstract ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + ColumnStatistics getTableColumnStatistics(String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; /** * Returns the relevant column statistics for given columns in given partitions in a given * table in a given database if such statistics exist. */ - public abstract List getPartitionColumnStatistics( + List getPartitionColumnStatistics( String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; @@ -446,7 +446,7 @@ public abstract ColumnStatistics getTableColumnStatistics(String dbName, String * @throws InvalidInputException */ - public abstract boolean deletePartitionColumnStatistics(String dbName, String tableName, + boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; @@ -465,34 +465,34 @@ public abstract boolean deletePartitionColumnStatistics(String dbName, String ta * @throws InvalidInputException */ - public abstract boolean deleteTableColumnStatistics(String dbName, String tableName, + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; - public abstract long cleanupEvents(); + long cleanupEvents(); - public abstract boolean addToken(String tokenIdentifier, String delegationToken); + boolean addToken(String tokenIdentifier, String delegationToken); - public abstract boolean removeToken(String tokenIdentifier); + boolean removeToken(String tokenIdentifier); - public abstract String getToken(String tokenIdentifier); + String getToken(String tokenIdentifier); - public abstract List getAllTokenIdentifiers(); + List getAllTokenIdentifiers(); - public abstract int addMasterKey(String key) throws MetaException; + int addMasterKey(String key) throws MetaException; - public abstract void updateMasterKey(Integer seqNo, String key) + void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException; - public abstract boolean removeMasterKey(Integer keySeq); + boolean removeMasterKey(Integer keySeq); - public abstract String[] getMasterKeys(); + String[] getMasterKeys(); - public abstract void verifySchema() throws MetaException; + void verifySchema() throws MetaException; - public abstract String getMetaStoreSchemaVersion() throws MetaException; + String getMetaStoreSchemaVersion() throws MetaException; - public abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; + abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; void dropPartitions(String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; @@ -533,7 +533,7 @@ void dropPartitions(String dbName, String tblName, List partNames) * @throws InvalidObjectException * @throws MetaException */ - public void createFunction(Function func) + void createFunction(Function func) throws InvalidObjectException, MetaException; /** @@ -544,7 +544,7 @@ public void createFunction(Function func) * @throws InvalidObjectException * @throws MetaException */ - public void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; /** @@ -556,7 +556,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) * @throws InvalidObjectException * @throws InvalidInputException */ - public void dropFunction(String dbName, String funcName) + void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** @@ -566,14 +566,14 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public Function getFunction(String dbName, String funcName) throws MetaException; + Function getFunction(String dbName, String funcName) throws MetaException; /** * Retrieve all functions. * @return * @throws MetaException */ - public List getAllFunctions() throws MetaException; + List getAllFunctions() throws MetaException; /** * Retrieve list of function names based on name pattern. @@ -582,9 +582,9 @@ public void dropFunction(String dbName, String funcName) * @return * @throws MetaException */ - public List getFunctions(String dbName, String pattern) throws MetaException; + List getFunctions(String dbName, String pattern) throws MetaException; - public AggrStats get_aggr_stats_for(String dbName, String tblName, + AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** @@ -597,7 +597,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * @throws MetaException * @throws NoSuchObjectException */ - public Map> getColStatsForTablePartitions(String dbName, + Map> getColStatsForTablePartitions(String dbName, String tableName) throws MetaException, NoSuchObjectException; /** @@ -605,20 +605,20 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId */ - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst); + NotificationEventResponse getNextNotification(NotificationEventRequest rqst); /** * Add a notification entry. This should only be called from inside the metastore * @param event the notification to add */ - public void addNotificationEvent(NotificationEvent event); + void addNotificationEvent(NotificationEvent event); /** * Remove older notification events. * @param olderThan Remove any events older than a given number of seconds */ - public void cleanNotificationEvents(int olderThan); + void cleanNotificationEvents(int olderThan); /** * Get the last issued notification event id. This is intended for use by the export command @@ -626,7 +626,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * and determine which notification events happened before or after the export. * @return */ - public CurrentNotificationEventId getCurrentNotificationEventId(); + CurrentNotificationEventId getCurrentNotificationEventId(); /** * Get the number of events corresponding to given database with fromEventId. @@ -639,7 +639,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, * Flush any catalog objects held by the metastore implementation. Note that this does not * flush statistics objects. This should be called at the beginning of each query. */ - public void flushCache(); + void flushCache(); /** * @param fileIds List of file IDs from the filesystem. @@ -699,7 +699,7 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] @InterfaceStability.Evolving int getDatabaseCount() throws MetaException; - public abstract List getPrimaryKeys(String db_name, + List getPrimaryKeys(String db_name, String tbl_name) throws MetaException; /** @@ -714,14 +714,14 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] * matches the arguments the results here will be all mixed together into a single list. * @throws MetaException if something goes wrong. */ - public abstract List getForeignKeys(String parent_db_name, + List getForeignKeys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException; - public abstract List getUniqueConstraints(String db_name, + List getUniqueConstraints(String db_name, String tbl_name) throws MetaException; - public abstract List getNotNullConstraints(String db_name, + List getNotNullConstraints(String db_name, String tbl_name) throws MetaException; List createTableWithConstraints(Table tbl, List primaryKeys, diff --git metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java similarity index 83% rename from metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java index c5e117d8a9..2fd22683ef 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStoreProxy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,12 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.lang.ClassUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.classification.InterfaceAudience; -import org.apache.hadoop.hive.common.classification.InterfaceStability; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.Private @@ -41,16 +42,16 @@ private final RawStore base; private final MetaStoreInit.MetaStoreInitData metaStoreInitData = new MetaStoreInit.MetaStoreInitData(); - private final HiveConf hiveConf; + private final Configuration hiveConf; private final Configuration conf; // thread local conf from HMS private final long socketTimeout; - protected RawStoreProxy(HiveConf hiveConf, Configuration conf, + protected RawStoreProxy(Configuration hiveConf, Configuration conf, Class rawStoreClass, int id) throws MetaException { this.conf = conf; this.hiveConf = hiveConf; - this.socketTimeout = HiveConf.getTimeVar(hiveConf, - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); + this.socketTimeout = MetastoreConf.getTimeVar(hiveConf, + MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); // This has to be called before initializing the instance of RawStore init(); @@ -58,11 +59,10 @@ protected RawStoreProxy(HiveConf hiveConf, Configuration conf, this.base = ReflectionUtils.newInstance(rawStoreClass, conf); } - public static RawStore getProxy(HiveConf hiveConf, Configuration conf, String rawStoreClassName, + public static RawStore getProxy(Configuration hiveConf, Configuration conf, String rawStoreClassName, int id) throws MetaException { - Class baseClass = (Class) MetaStoreUtils.getClass( - rawStoreClassName); + Class baseClass = JavaUtils.getClass(rawStoreClassName, RawStore.class); RawStoreProxy handler = new RawStoreProxy(hiveConf, conf, baseClass, id); @@ -88,10 +88,6 @@ private void init() throws MetaException { MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData); } - private void initMS() { - base.setConf(getConf()); - } - @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 7c8054bee6..08ea67fc2f 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java index 45ed1e70e7..2e92a4f4e1 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java similarity index 85% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index aaeb6d4768..ab6b90fb6b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,13 @@ import java.util.List; import java.util.regex.Pattern; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; public class CacheUtils { private static final String delimit = "\u0001"; @@ -79,7 +78,7 @@ public static String buildKey(String dbName, String tableName, String colName) { String[] comps = key.split(delimit); result[0] = comps[0]; result[1] = comps[1]; - List vals = new ArrayList(); + List vals = new ArrayList<>(); for (int i=2;i()); + sdCopy.setBucketCols(new ArrayList<>()); } if (sdCopy.getSortCols()==null) { - sdCopy.setSortCols(new ArrayList()); + sdCopy.setSortCols(new ArrayList<>()); } if (sdCopy.getSkewedInfo()==null) { - sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList(), - new ArrayList>(), new HashMap,String>())); + sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<>(), + new ArrayList<>(), new HashMap<>())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -114,14 +113,14 @@ static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { if (wrapper.getSdHash()!=null) { StorageDescriptor sdCopy = sharedCache.getSdFromCache(wrapper.getSdHash()).deepCopy(); if (sdCopy.getBucketCols()==null) { - sdCopy.setBucketCols(new ArrayList()); + sdCopy.setBucketCols(new ArrayList<>()); } if (sdCopy.getSortCols()==null) { - sdCopy.setSortCols(new ArrayList()); + sdCopy.setSortCols(new ArrayList<>()); } if (sdCopy.getSkewedInfo()==null) { - sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList(), - new ArrayList>(), new HashMap,String>())); + sdCopy.setSkewedInfo(new SkewedInfo(new ArrayList<>(), + new ArrayList<>(), new HashMap<>())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -135,7 +134,7 @@ public static boolean matches(String name, String pattern) { for (String subpattern : subpatterns) { subpattern = "(?i)" + subpattern.replaceAll("\\?", ".{1}").replaceAll("\\*", ".*") .replaceAll("\\^", "\\\\^").replaceAll("\\$", "\\\\$"); - if (Pattern.matches(subpattern, HiveStringUtils.normalizeIdentifier(name))) { + if (Pattern.matches(subpattern, StringUtils.normalizeIdentifier(name))) { return true; } } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java similarity index 86% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index cdac0d32fd..e66052bf8b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,13 +37,9 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Deadline; import org.apache.hadoop.hive.metastore.FileMetadataHandler; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.PartFilterExprUtil; import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; @@ -91,8 +87,12 @@ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -123,8 +123,8 @@ private static ReentrantReadWriteLock partitionColStatsCacheLock = new ReentrantReadWriteLock( true); private static AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false); - RawStore rawStore = null; - Configuration conf; + private RawStore rawStore = null; + private Configuration conf; private PartitionExpressionProxy expressionProxy = null; // Default value set to 100 milliseconds for test purpose private static long cacheRefreshPeriod = 100; @@ -203,12 +203,12 @@ public int getRefCount() { public CachedStore() { } - public static void initSharedCacheAsync(HiveConf conf) { + public static void initSharedCacheAsync(Configuration conf) { String clazzName = null; boolean isEnabled = false; try { clazzName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.RAW_STORE_IMPL); - isEnabled = MetaStoreUtils.getClass(clazzName).isAssignableFrom(CachedStore.class); + isEnabled = JavaUtils.getClass(clazzName, RawStore.class).isAssignableFrom(CachedStore.class); } catch (MetaException e) { LOG.error("Cannot instantiate metastore class", e); } @@ -221,12 +221,11 @@ public static void initSharedCacheAsync(HiveConf conf) { @Override public void setConf(Configuration conf) { - String rawStoreClassName = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_IMPL, + String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { - rawStore = ((Class) MetaStoreUtils.getClass( - rawStoreClassName)).newInstance(); + rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance(); } catch (Exception e) { throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e); } @@ -250,18 +249,18 @@ static void prewarm(RawStore rawStore) throws Exception { SharedCache sharedCache = sharedCacheWrapper.getUnsafe(); for (String dbName : dbNames) { Database db = rawStore.getDatabase(dbName); - sharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(dbName), db); + sharedCache.addDatabaseToCache(StringUtils.normalizeIdentifier(dbName), db); List tblNames = rawStore.getAllTables(dbName); for (String tblName : tblNames) { Table table = rawStore.getTable(dbName, tblName); - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), table); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), table); Deadline.startTimer("getPartitions"); List partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); for (Partition partition : partitions) { - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partition); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partition); } // Cache partition column stats Deadline.startTimer("getColStatsForTablePartitions"); @@ -278,8 +277,8 @@ static void prewarm(RawStore rawStore) throws Exception { rawStore.getTableColumnStatistics(dbName, tblName, colNames); Deadline.stopTimer(); if ((tableColStats != null) && (tableColStats.getStatsObjSize() > 0)) { - sharedCache.addTableColStatsToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); + sharedCache.addTableColStatsToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } } @@ -297,10 +296,9 @@ public Thread newThread(Runnable r) { return t; } }); - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) { + if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { cacheRefreshPeriod = - HiveConf.getTimeVar(conf, - HiveConf.ConfVars.METASTORE_CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, + MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); } LOG.info("CachedStore: starting cache update service (run every " + cacheRefreshPeriod + "ms"); @@ -337,11 +335,10 @@ static void setCacheRefreshPeriod(long time) { private final RawStore rawStore; public CacheUpdateMasterWork(Configuration conf) { - String rawStoreClassName = HiveConf.getVar(conf, - ConfVars.METASTORE_CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); + String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, + ObjectStore.class.getName()); try { - rawStore = ((Class) MetaStoreUtils.getClass( - rawStoreClassName)).newInstance(); + rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance(); rawStore.setConf(conf); } catch (InstantiationException | IllegalAccessException | MetaException e) { // MetaException here really means ClassNotFound (see the utility method). @@ -402,7 +399,7 @@ public void update() { private void updateDatabases(RawStore rawStore, List dbNames) { // Prepare the list of databases - List databases = new ArrayList(); + List databases = new ArrayList<>(); for (String dbName : dbNames) { Database db; try { @@ -431,13 +428,13 @@ private void updateDatabases(RawStore rawStore, List dbNames) { // Update the cached table objects private void updateTables(RawStore rawStore, String dbName) { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); try { List tblNames = rawStore.getAllTables(dbName); for (String tblName : tblNames) { Table table = - rawStore.getTable(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + rawStore.getTable(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); tables.add(table); } if (tableCacheLock.writeLock().tryLock()) { @@ -470,8 +467,8 @@ private void updateTablePartitions(RawStore rawStore, String dbName, String tblN return; } sharedCacheWrapper.getUnsafe().refreshPartitions( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partitions); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partitions); } } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); @@ -500,8 +497,8 @@ private void updateTableColStats(RawStore rawStore, String dbName, String tblNam return; } sharedCacheWrapper.getUnsafe().refreshTableColStats( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } } catch (MetaException | NoSuchObjectException e) { @@ -529,8 +526,8 @@ private void updateTablePartitionColStats(RawStore rawStore, String dbName, Stri return; } sharedCacheWrapper.getUnsafe().refreshPartitionColStats( - HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), colStatsPerPartition); + StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), colStatsPerPartition); } } } catch (MetaException | NoSuchObjectException e) { @@ -583,7 +580,7 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.addDatabaseToCache(HiveStringUtils.normalizeIdentifier(db.getName()), + sharedCache.addDatabaseToCache(StringUtils.normalizeIdentifier(db.getName()), db.deepCopy()); } finally { databaseCacheLock.readLock().unlock(); @@ -601,7 +598,7 @@ public Database getDatabase(String dbName) throws NoSuchObjectException { } catch (MetaException e) { throw new RuntimeException(e); // TODO: why doesn't getDatabase throw MetaEx? } - Database db = sharedCache.getDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName)); + Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); if (db == null) { throw new NoSuchObjectException(); } @@ -618,7 +615,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbname)); + sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbname)); } finally { databaseCacheLock.readLock().unlock(); } @@ -637,7 +634,7 @@ public boolean alterDatabase(String dbName, Database db) throws NoSuchObjectExce // Wait if background cache update is happening databaseCacheLock.readLock().lock(); isDatabaseCacheDirty.set(true); - sharedCache.alterDatabaseInCache(HiveStringUtils.normalizeIdentifier(dbName), db); + sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(dbName), db); } finally { databaseCacheLock.readLock().unlock(); } @@ -651,9 +648,9 @@ public boolean alterDatabase(String dbName, Database db) throws NoSuchObjectExce if (sharedCache == null) { return rawStore.getDatabases(pattern); } - List results = new ArrayList(); + List results = new ArrayList<>(); for (String dbName : sharedCache.listCachedDatabases()) { - dbName = HiveStringUtils.normalizeIdentifier(dbName); + dbName = StringUtils.normalizeIdentifier(dbName); if (CacheUtils.matches(dbName, pattern)) { results.add(dbName); } @@ -713,8 +710,8 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException // Wait if background cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()), - HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); } finally { tableCacheLock.readLock().unlock(); } @@ -732,8 +729,8 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, // Wait if background table cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.removeTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + sharedCache.removeTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); } finally { tableCacheLock.readLock().unlock(); } @@ -742,8 +739,8 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, // Wait if background table col stats cache update is happening tableColStatsCacheLock.readLock().lock(); isTableColStatsCacheDirty.set(true); - sharedCache.removeTableColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + sharedCache.removeTableColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); } finally { tableColStatsCacheLock.readLock().unlock(); } @@ -757,8 +754,8 @@ public Table getTable(String dbName, String tableName) throws MetaException { if (sharedCache == null) { return rawStore.getTable(dbName, tableName); } - Table tbl = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName)); + Table tbl = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName)); if (tbl != null) { tbl.unsetPrivileges(); tbl.setRewriteEnabled(tbl.isRewriteEnabled()); @@ -776,8 +773,8 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(part.getDbName()), - HiveStringUtils.normalizeIdentifier(part.getTableName()), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(part.getDbName()), + StringUtils.normalizeIdentifier(part.getTableName()), part); } finally { partitionCacheLock.readLock().unlock(); } @@ -797,8 +794,8 @@ public boolean addPartitions(String dbName, String tblName, List part partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); for (Partition part : parts) { - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part); } } finally { partitionCacheLock.readLock().unlock(); @@ -821,8 +818,8 @@ public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy p PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part); + sharedCache.addPartitionToCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part); } } finally { partitionCacheLock.readLock().unlock(); @@ -839,8 +836,8 @@ public Partition getPartition(String dbName, String tableName, List part return rawStore.getPartition(dbName, tableName, part_vals); } Partition part = - sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); if (part != null) { part.unsetPrivileges(); } else { @@ -856,8 +853,8 @@ public boolean doesPartitionExist(String dbName, String tableName, if (sharedCache == null) { return rawStore.doesPartitionExist(dbName, tableName, part_vals); } - return sharedCache.existPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + return sharedCache.existPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } @Override @@ -872,8 +869,8 @@ public boolean dropPartition(String dbName, String tableName, List part_ // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.removePartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } finally { partitionCacheLock.readLock().unlock(); } @@ -882,8 +879,8 @@ public boolean dropPartition(String dbName, String tableName, List part_ // Wait if background cache update is happening partitionColStatsCacheLock.readLock().lock(); isPartitionColStatsCacheDirty.set(true); - sharedCache.removePartitionColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), part_vals); + sharedCache.removePartitionColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), part_vals); } finally { partitionColStatsCacheLock.readLock().unlock(); } @@ -898,8 +895,8 @@ public boolean dropPartition(String dbName, String tableName, List part_ if (sharedCache == null) { return rawStore.getPartitions(dbName, tableName, max); } - List parts = sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), max); + List parts = sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), max); if (parts != null) { for (Partition part : parts) { part.unsetPrivileges(); @@ -920,8 +917,8 @@ public void alterTable(String dbName, String tblName, Table newTable) // Wait if background cache update is happening tableCacheLock.readLock().lock(); isTableCacheDirty.set(true); - sharedCache.alterTableInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), newTable); + sharedCache.alterTableInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), newTable); } finally { tableCacheLock.readLock().unlock(); } @@ -931,8 +928,8 @@ public void alterTable(String dbName, String tblName, Table newTable) // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.alterTableInPartitionCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), newTable); + sharedCache.alterTableInPartitionCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), newTable); } finally { partitionCacheLock.readLock().unlock(); } @@ -945,8 +942,8 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.getTables(dbName, pattern); } - List tableNames = new ArrayList(); - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + List tableNames = new ArrayList<>(); + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), pattern)) { tableNames.add(table.getTableName()); } @@ -961,8 +958,8 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.getTables(dbName, pattern); } - List tableNames = new ArrayList(); - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + List tableNames = new ArrayList<>(); + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), pattern) && table.getTableType().equals(tableType.toString())) { tableNames.add(table.getTableName()); @@ -978,8 +975,8 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.getTableMeta(dbNames, tableNames, tableTypes); } - return sharedCache.getTableMeta(HiveStringUtils.normalizeIdentifier(dbNames), - HiveStringUtils.normalizeIdentifier(tableNames), tableTypes); + return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(dbNames), + StringUtils.normalizeIdentifier(tableNames), tableTypes); } @Override @@ -989,10 +986,10 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.getTableObjectsByName(dbName, tblNames); } - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); for (String tblName : tblNames) { - tables.add(sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName))); + tables.add(sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName))); } return tables; } @@ -1007,9 +1004,9 @@ public void alterTable(String dbName, String tblName, Table newTable) } private static List getAllTablesInternal(String dbName, SharedCache sharedCache) { - List tblNames = new ArrayList(); - for (Table tbl : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { - tblNames.add(HiveStringUtils.normalizeIdentifier(tbl.getTableName())); + List tblNames = new ArrayList<>(); + for (Table tbl : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { + tblNames.add(StringUtils.normalizeIdentifier(tbl.getTableName())); } return tblNames; } @@ -1021,9 +1018,9 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.listTableNamesByFilter(dbName, filter, max_tables); } - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); int count = 0; - for (Table table : sharedCache.listCachedTables(HiveStringUtils.normalizeIdentifier(dbName))) { + for (Table table : sharedCache.listCachedTables(StringUtils.normalizeIdentifier(dbName))) { if (CacheUtils.matches(table.getTableName(), filter) && (max_tables == -1 || count < max_tables)) { tableNames.add(table.getTableName()); @@ -1040,12 +1037,12 @@ public void alterTable(String dbName, String tblName, Table newTable) if (sharedCache == null) { return rawStore.listPartitionNames(dbName, tblName, max_parts); } - List partitionNames = new ArrayList(); - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partitionNames = new ArrayList<>(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), max_parts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), max_parts)) { if (max_parts == -1 || count < max_parts) { partitionNames.add(Warehouse.makePartName(t.getPartitionKeys(), part.getValues())); } @@ -1078,8 +1075,8 @@ public void alterPartition(String dbName, String tblName, List partVals, // Wait if background cache update is happening partitionCacheLock.readLock().lock(); isPartitionCacheDirty.set(true); - sharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } finally { partitionCacheLock.readLock().unlock(); } @@ -1088,8 +1085,8 @@ public void alterPartition(String dbName, String tblName, List partVals, // Wait if background cache update is happening partitionColStatsCacheLock.readLock().lock(); isPartitionColStatsCacheDirty.set(true); - sharedCache.alterPartitionInColStatsCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInColStatsCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } finally { partitionColStatsCacheLock.readLock().unlock(); } @@ -1109,8 +1106,8 @@ public void alterPartitions(String dbName, String tblName, List> pa for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); - sharedCache.alterPartitionInCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } } finally { partitionCacheLock.readLock().unlock(); @@ -1123,8 +1120,8 @@ public void alterPartitions(String dbName, String tblName, List> pa for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); - sharedCache.alterPartitionInColStatsCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals, newPart); + sharedCache.alterPartitionInColStatsCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals, newPart); } } finally { partitionColStatsCacheLock.readLock().unlock(); @@ -1171,13 +1168,13 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { List parts = sharedCache.listCachedPartitions( - HiveStringUtils.normalizeIdentifier(table.getDbName()), - HiveStringUtils.normalizeIdentifier(table.getTableName()), maxParts); + StringUtils.normalizeIdentifier(table.getDbName()), + StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } if (defaultPartName == null || defaultPartName.isEmpty()) { - defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr( table.getPartitionKeys(), expr, defaultPartName, result); @@ -1200,14 +1197,14 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, return rawStore.getPartitionsByExpr( dbName, tblName, expr, defaultPartitionName, maxParts, result); } - List partNames = new LinkedList(); - Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partNames = new LinkedList<>(); + Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( table, expr, defaultPartitionName, maxParts, partNames, sharedCache); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); part.unsetPrivileges(); result.add(part); } @@ -1222,8 +1219,8 @@ public int getNumPartitionsByFilter(String dbName, String tblName, // if (sharedCache == null) { return rawStore.getNumPartitionsByFilter(dbName, tblName, filter); // } - // Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - // HiveStringUtils.normalizeIdentifier(tblName)); + // Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + // StringUtils.normalizeIdentifier(tblName)); // return 0; } @@ -1234,10 +1231,10 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) if (sharedCache == null) { return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); } - String defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); - List partNames = new LinkedList(); - Table table = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); + List partNames = new LinkedList<>(); + Table table = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); getPartitionNamesPrunedByExprNoTxn( table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); return partNames.size(); @@ -1245,7 +1242,7 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) private static List partNameToVals(String name) { if (name == null) return null; - List vals = new ArrayList(); + List vals = new ArrayList<>(); String[] kvp = name.split("/"); for (String kv : kvp) { vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1))); @@ -1260,10 +1257,10 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) if (sharedCache == null) { return rawStore.getPartitionsByNames(dbName, tblName, partNames); } - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partNameToVals(partName)); if (part!=null) { partitions.add(part); } @@ -1435,11 +1432,11 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (sharedCache == null) { return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); } - Partition p = sharedCache.getPartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), partVals); + Partition p = sharedCache.getPartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partVals); if (p!=null) { - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); String partName = Warehouse.makePartName(t.getPartitionKeys(), partVals); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, userName, groupNames); @@ -1456,12 +1453,12 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (sharedCache == null) { return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); } - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); - List partitions = new ArrayList(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); + List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(t.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, @@ -1482,12 +1479,12 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (sharedCache == null) { return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); } - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); int count = 0; - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { boolean psMatch = true; for (int i=0;i partitions = new ArrayList(); - Table t = sharedCache.getTableFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName)); + List partitions = new ArrayList<>(); + Table t = sharedCache.getTableFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), maxParts)) { boolean psMatch = true; for (int i=0;i colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); for (String colName : colNames) { String colStatsCacheKey = - CacheUtils.buildKey(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), colName); + CacheUtils.buildKey(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), colName); ColumnStatisticsObj colStat = sharedCache.getCachedTableColStats(colStatsCacheKey); if (colStat != null) { colStatObjs.add(colStat); @@ -1625,8 +1622,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri // Wait if background cache update is happening tableColStatsCacheLock.readLock().lock(); isTableColStatsCacheDirty.set(true); - sharedCache.removeTableColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tableName), colName); + sharedCache.removeTableColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tableName), colName); } finally { tableColStatsCacheLock.readLock().unlock(); } @@ -1656,8 +1653,8 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List return rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); } List colStats = mergeColStatsForPartitions( - HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), partNames, colNames, sharedCache); return new AggrStats(colStats, partNames.size()); } @@ -1724,10 +1721,9 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List private List mergeColStatsForPartitions(String dbName, String tblName, List partNames, List colNames, SharedCache sharedCache) throws MetaException { - final boolean useDensityFunctionForNDVEstimation = HiveConf.getBoolVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER); + final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), + ConfVars.STATS_NDV_DENSITY_FUNCTION); + final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); Map> map = new HashMap<>(); for (String colName : colNames) { @@ -1833,8 +1829,8 @@ public void dropPartitions(String dbName, String tblName, List partNames isPartitionCacheDirty.set(true); for (String partName : partNames) { List vals = partNameToVals(partName); - sharedCache.removePartitionFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), vals); + sharedCache.removePartitionFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), vals); } } finally { partitionCacheLock.readLock().unlock(); @@ -1846,8 +1842,8 @@ public void dropPartitions(String dbName, String tblName, List partNames isPartitionColStatsCacheDirty.set(true); for (String partName : partNames) { List part_vals = partNameToVals(partName); - sharedCache.removePartitionColStatsFromCache(HiveStringUtils.normalizeIdentifier(dbName), - HiveStringUtils.normalizeIdentifier(tblName), part_vals); + sharedCache.removePartitionColStatsFromCache(StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), part_vals); } } finally { partitionColStatsCacheLock.readLock().unlock(); @@ -2085,8 +2081,8 @@ public int getDatabaseCount() throws MetaException { uniqueConstraints, notNullConstraints); SharedCache sharedCache = sharedCacheWrapper.get(); if (sharedCache == null) return constraintNames; - sharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()), - HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } @@ -2149,7 +2145,7 @@ public String getMetastoreDbUuid() throws MetaException { // CachedStore that's specific to SharedCache (e.g. update threads) should be refactored to // be part of this, then this could be moved out of this file (or merged with SharedCache). private static final class SharedCacheWrapper { - private static enum InitState { + private enum InitState { NOT_ENABLED, INITIALIZING, INITIALIZED, FAILED_FATAL } @@ -2184,7 +2180,7 @@ void updateInitState(Throwable error, boolean isFatal) { } } - void startInit(HiveConf conf) { + void startInit(Configuration conf) { LOG.info("Initializing shared cache"); synchronized (initLock) { assert initState == InitState.NOT_ENABLED; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java similarity index 91% rename from metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index e713de051d..a76b8480b0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import java.util.Map.Entry; import java.util.TreeMap; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -41,15 +40,16 @@ import org.apache.hadoop.hive.metastore.cache.CachedStore.PartitionWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.StorageDescriptorWrapper; import org.apache.hadoop.hive.metastore.cache.CachedStore.TableWrapper; -import org.apache.hive.common.util.HiveStringUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; public class SharedCache { - private Map databaseCache = new TreeMap(); - private Map tableCache = new TreeMap(); + private Map databaseCache = new TreeMap<>(); + private Map tableCache = new TreeMap<>(); private Map partitionCache = new TreeMap<>(); private Map partitionColStatsCache = new TreeMap<>(); private Map tableColStatsCache = new TreeMap<>(); @@ -72,7 +72,7 @@ public synchronized Database getDatabaseFromCache(String name) { public synchronized void addDatabaseToCache(String dbName, Database db) { Database dbCopy = db.deepCopy(); - dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbName)); + dbCopy.setName(StringUtils.normalizeIdentifier(dbName)); databaseCache.put(dbName, dbCopy); } @@ -81,12 +81,12 @@ public synchronized void removeDatabaseFromCache(String dbName) { } public synchronized List listCachedDatabases() { - return new ArrayList(databaseCache.keySet()); + return new ArrayList<>(databaseCache.keySet()); } public synchronized void alterDatabaseInCache(String dbName, Database newDb) { - removeDatabaseFromCache(HiveStringUtils.normalizeIdentifier(dbName)); - addDatabaseToCache(HiveStringUtils.normalizeIdentifier(newDb.getName()), newDb.deepCopy()); + removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + addDatabaseToCache(StringUtils.normalizeIdentifier(newDb.getName()), newDb.deepCopy()); } public synchronized int getCachedDatabaseCount() { @@ -104,11 +104,11 @@ public synchronized Table getTableFromCache(String dbName, String tableName) { public synchronized void addTableToCache(String dbName, String tblName, Table tbl) { Table tblCopy = tbl.deepCopy(); - tblCopy.setDbName(HiveStringUtils.normalizeIdentifier(dbName)); - tblCopy.setTableName(HiveStringUtils.normalizeIdentifier(tblName)); + tblCopy.setDbName(StringUtils.normalizeIdentifier(dbName)); + tblCopy.setTableName(StringUtils.normalizeIdentifier(tblName)); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { - fs.setName(HiveStringUtils.normalizeIdentifier(fs.getName())); + fs.setName(StringUtils.normalizeIdentifier(fs.getName())); } } TableWrapper wrapper; @@ -174,8 +174,8 @@ public synchronized void updateTableColStatsInCache(String dbName, String tableN public synchronized void alterTableInCache(String dbName, String tblName, Table newTable) { removeTableFromCache(dbName, tblName); - addTableToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()), - HiveStringUtils.normalizeIdentifier(newTable.getTableName()), newTable); + addTableToCache(StringUtils.normalizeIdentifier(newTable.getDbName()), + StringUtils.normalizeIdentifier(newTable.getTableName()), newTable); } public synchronized void alterTableInPartitionCache(String dbName, String tblName, @@ -184,10 +184,10 @@ public synchronized void alterTableInPartitionCache(String dbName, String tblNam List partitions = listCachedPartitions(dbName, tblName, -1); for (Partition part : partitions) { removePartitionFromCache(part.getDbName(), part.getTableName(), part.getValues()); - part.setDbName(HiveStringUtils.normalizeIdentifier(newTable.getDbName())); - part.setTableName(HiveStringUtils.normalizeIdentifier(newTable.getTableName())); - addPartitionToCache(HiveStringUtils.normalizeIdentifier(newTable.getDbName()), - HiveStringUtils.normalizeIdentifier(newTable.getTableName()), part); + part.setDbName(StringUtils.normalizeIdentifier(newTable.getDbName())); + part.setTableName(StringUtils.normalizeIdentifier(newTable.getTableName())); + addPartitionToCache(StringUtils.normalizeIdentifier(newTable.getDbName()), + StringUtils.normalizeIdentifier(newTable.getTableName()), part); } } } @@ -199,7 +199,7 @@ public synchronized void alterTableInTableColStatsCache(String dbName, String tb Iterator> iterator = tableColStatsCache.entrySet().iterator(); Map newTableColStats = - new HashMap(); + new HashMap<>(); while (iterator.hasNext()) { Entry entry = iterator.next(); String key = entry.getKey(); @@ -219,8 +219,7 @@ public synchronized void alterTableInPartitionColStatsCache(String dbName, Strin Table newTable) { if (!dbName.equals(newTable.getDbName()) || !tblName.equals(newTable.getTableName())) { List partitions = listCachedPartitions(dbName, tblName, -1); - Map newPartitionColStats = - new HashMap(); + Map newPartitionColStats = new HashMap<>(); for (Partition part : partitions) { String oldPartialPartitionKey = CacheUtils.buildKeyWithDelimit(dbName, tblName, part.getValues()); @@ -249,7 +248,7 @@ public synchronized int getCachedTableCount() { } public synchronized List
listCachedTables(String dbName) { - List
tables = new ArrayList
(); + List
tables = new ArrayList<>(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.getTable().getDbName().equals(dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); @@ -259,7 +258,7 @@ public synchronized int getCachedTableCount() { } public synchronized List getTableMeta(String dbNames, String tableNames, List tableTypes) { - List tableMetas = new ArrayList(); + List tableMetas = new ArrayList<>(); for (String dbName : listCachedDatabases()) { if (CacheUtils.matches(dbName, dbNames)) { for (Table table : listCachedTables(dbName)) { @@ -355,7 +354,7 @@ public synchronized void removePartitionColStatsFromCache(String dbName, String } public synchronized List listCachedPartitions(String dbName, String tblName, int max) { - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); int count = 0; for (PartitionWrapper wrapper : partitionCache.values()) { if (wrapper.getPartition().getDbName().equals(dbName) @@ -371,15 +370,14 @@ public synchronized void removePartitionColStatsFromCache(String dbName, String public synchronized void alterPartitionInCache(String dbName, String tblName, List partVals, Partition newPart) { removePartitionFromCache(dbName, tblName, partVals); - addPartitionToCache(HiveStringUtils.normalizeIdentifier(newPart.getDbName()), - HiveStringUtils.normalizeIdentifier(newPart.getTableName()), newPart); + addPartitionToCache(StringUtils.normalizeIdentifier(newPart.getDbName()), + StringUtils.normalizeIdentifier(newPart.getTableName()), newPart); } public synchronized void alterPartitionInColStatsCache(String dbName, String tblName, List partVals, Partition newPart) { String oldPartialPartitionKey = CacheUtils.buildKeyWithDelimit(dbName, tblName, partVals); - Map newPartitionColStats = - new HashMap(); + Map newPartitionColStats = new HashMap<>(); Iterator> iterator = partitionColStatsCache.entrySet().iterator(); while (iterator.hasNext()) { @@ -389,8 +387,8 @@ public synchronized void alterPartitionInColStatsCache(String dbName, String tbl if (key.toLowerCase().startsWith(oldPartialPartitionKey.toLowerCase())) { Object[] decomposedKey = CacheUtils.splitPartitionColStats(key); String newKey = - CacheUtils.buildKey(HiveStringUtils.normalizeIdentifier(newPart.getDbName()), - HiveStringUtils.normalizeIdentifier(newPart.getTableName()), newPart.getValues(), + CacheUtils.buildKey(StringUtils.normalizeIdentifier(newPart.getDbName()), + StringUtils.normalizeIdentifier(newPart.getTableName()), newPart.getValues(), (String) decomposedKey[3]); newPartitionColStats.put(newKey, colStatObj); iterator.remove(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java index e6c836b183..45d5d8c984 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BinaryColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java index a34bc9f38b..8aac0fe33d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/BooleanColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java index a52e5e5275..cd0392d6c0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java index dfae70828c..7aaab4a6b9 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/ColumnStatsAggregatorFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java index ee953966c7..7f2956152c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DateColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -145,12 +145,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -261,7 +261,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDateStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java index 284c12cc2b..05c0280262 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DecimalColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimator; import org.apache.hadoop.hive.common.ndv.NumDistinctValueEstimatorFactory; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; @@ -36,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.columnstats.cache.DecimalColumnStatsDataInspector; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -155,12 +155,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, } else { // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -270,7 +270,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDecimalStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java index bb4a725d44..faf22dcd7c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/DoubleColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -142,12 +142,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, } else { // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -245,7 +245,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getDoubleStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java similarity index 95% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java index acf679e1c3..98a121be38 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/IExtrapolatePartStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ * the average of ndv density, which is useful when * useDensityFunctionForNDVEstimation is true. */ - public abstract void extrapolate(ColumnStatisticsData extrapolateData, int numParts, + void extrapolate(ColumnStatisticsData extrapolateData, int numParts, int numPartsWithStats, Map adjustedIndexMap, Map adjustedStatsMap, double densityAvg); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java index 5b1145e507..d12cdc08ea 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/LongColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -143,12 +143,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higerbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; @@ -246,7 +246,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getLongStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the lowValue Collections.sort(list, new Comparator>() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java similarity index 98% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java index 1b29f92d4a..4539e6b026 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/aggr/StringColumnStatsAggregator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -125,12 +125,12 @@ public ColumnStatisticsObj aggregate(String colName, List partNames, // we need extrapolation LOG.debug("start extrapolation for " + colName); - Map indexMap = new HashMap(); + Map indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } - Map adjustedIndexMap = new HashMap(); - Map adjustedStatsMap = new HashMap(); + Map adjustedIndexMap = new HashMap<>(); + Map adjustedStatsMap = new HashMap<>(); if (ndvEstimator == null) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. @@ -217,7 +217,7 @@ public void extrapolate(ColumnStatisticsData extrapolateData, int numParts, for (Map.Entry entry : adjustedStatsMap.entrySet()) { extractedAdjustedStatsMap.put(entry.getKey(), entry.getValue().getStringStats()); } - List> list = new LinkedList>( + List> list = new LinkedList<>( extractedAdjustedStatsMap.entrySet()); // get the avgLen Collections.sort(list, new Comparator>() { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DateColumnStatsDataInspector.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DecimalColumnStatsDataInspector.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/DoubleColumnStatsDataInspector.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/LongColumnStatsDataInspector.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/cache/StringColumnStatsDataInspector.java diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java index 4c2d1bc602..1c2402f564 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BinaryColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java index 8e5015323f..fd6b87aa67 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/BooleanColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java index 474d4ddcd1..ce557565c4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java index 66be524139..1a2d38e556 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/ColumnStatsMergerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java index e783d3c345..5baebbb47b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DateColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java index 54099f64c2..01f3385d70 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DecimalColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java index 817a55dc5b..6a95751815 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/DoubleColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java index dc048e0194..ca1a912052 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/LongColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java similarity index 99% rename from metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java index e353b8f70c..d6b4478ec8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/columnstats/merge/StringColumnStatsMerger.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 5a6ef990ee..5933318e64 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -499,12 +499,6 @@ public static ConfVars getMetaConf(String name) { "A comma separated list of metrics reporters to start"), MULTITHREADED("javax.jdo.option.Multithreaded", "javax.jdo.option.Multithreaded", true, "Set this to true if multiple threads access metastore through JDO concurrently."), - ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings", - "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, - "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + - "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + - "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + - "pruning is the correct behaviour"), MAX_OPEN_TXNS("metastore.max.open.txns", "hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" + "current open transactions reach this limit, future open transaction requests will be \n" + @@ -512,6 +506,21 @@ public static ConfVars getMetaConf(String name) { NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", "javax.jdo.option.NonTransactionalRead", true, "Reads outside of transactions"), + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("metastore.notification.sequence.lock.max.retries", + "hive.notification.sequence.lock.max.retries", 5, + "Number of retries required to acquire a lock when getting the next notification sequential ID for entries " + + "in the NOTIFICATION_LOG table."), + NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL( + "metastore.notification.sequence.lock.retry.sleep.interval", + "hive.notification.sequence.lock.retry.sleep.interval", 500, TimeUnit.MILLISECONDS, + "Sleep interval between retries to acquire a notification lock as described part of property " + + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()), + ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings", + "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, + "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + + "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + + "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + + "pruning is the correct behaviour"), PARTITION_NAME_WHITELIST_PATTERN("metastore.partition.name.whitelist.pattern", "hive.metastore.partition.name.whitelist.pattern", "", "Partition names will be checked against this regex pattern and rejected if not matched."), @@ -591,6 +600,8 @@ public static ConfVars getMetaConf(String name) { "Metastore SSL certificate truststore password."), STATS_AUTO_GATHER("metastore.stats.autogather", "hive.stats.autogather", true, "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."), + STATS_FETCH_BITVECTOR("metastore.stats.fetch.bitvector", "hive.stats.fetch.bitvector", false, + "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"), STATS_NDV_TUNER("metastore.stats.ndv.tuner", "hive.metastore.stats.ndv.tuner", 0.0, "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" + "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" + diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java similarity index 97% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index 12773ac9e9..d608e50b0d 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,6 @@ import org.antlr.runtime.CharStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.ColumnType; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; @@ -36,6 +34,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * The Class representing the filter as a binary tree. The tree has TreeNode's @@ -67,13 +67,13 @@ private final String sqlOp; // private constructor - private Operator(String op){ + Operator(String op){ this.op = op; this.jdoOp = op; this.sqlOp = op; } - private Operator(String op, String jdoOp, String sqlOp){ + Operator(String op, String jdoOp, String sqlOp){ this.op = op; this.jdoOp = jdoOp; this.sqlOp = sqlOp; @@ -351,7 +351,7 @@ private void generateJDOFilterOverPartitions(Configuration conf, Table table, if (filterBuilder.hasError()) return; boolean canPushDownIntegral = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN); + MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.INTEGER_JDO_PUSHDOWN); String valueAsString = getJdoFilterPushdownParam( table, partitionColumnIndex, filterBuilder, canPushDownIntegral); if (filterBuilder.hasError()) return; @@ -443,7 +443,7 @@ private String getJdoFilterPushdownParam(Table table, int partColIndex, // columns have been excluded above, so it will either compare w/string or fail. Object val = value; if (value instanceof Date) { - val = HiveMetaStore.PARTITION_DATE_FORMAT.get().format((Date)value); + val = MetaStoreUtils.PARTITION_DATE_FORMAT.get().format((Date)value); } boolean isStringValue = val instanceof String; if (!isStringValue && (!isIntegralSupported || !(val instanceof Long))) { @@ -487,7 +487,7 @@ public void accept(TreeVisitor treeVisitor) throws MetaException { private static void makeFilterForEquals(String keyName, String value, String paramName, Map params, int keyPos, int keyCount, boolean isEq, FilterBuilder fltr) throws MetaException { - Map partKeyToVal = new HashMap(); + Map partKeyToVal = new HashMap<>(); partKeyToVal.put(keyName, value); // If a partition has multiple partition keys, we make the assumption that // makePartName with one key will return a substring of the name made @@ -526,7 +526,7 @@ private static void makeFilterForEquals(String keyName, String value, String par /** * The node stack used to keep track of the tree nodes during parsing. */ - private final Stack nodeStack = new Stack(); + private final Stack nodeStack = new Stack<>(); public TreeNode getRoot() { return this.root; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/Filter.g similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/Filter.g rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/Filter.g diff --git metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/package-info.java similarity index 100% rename from metastore/src/java/org/apache/hadoop/hive/metastore/parser/package-info.java rename to standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/parser/package-info.java diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java index 1223b522b2..e3f7eca553 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java @@ -31,13 +31,13 @@ private Configuration conf; private int isAliveCounter = 0; private long lastLogTime = 0; + private TxnStore txnHandler; @Override public void run() { try { long startTime = System.currentTimeMillis(); isAliveCounter++; - TxnStore txnHandler = TxnUtils.getTxnStore(conf); txnHandler.countOpenTxns(); if (System.currentTimeMillis() - lastLogTime > 60 * 1000) { LOG.info("AcidOpenTxnsCounterService ran for " + @@ -54,6 +54,7 @@ public void run() { @Override public void setConf(Configuration configuration) { conf = configuration; + txnHandler = TxnUtils.getTxnStore(conf); } @Override diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java index 40f739301e..593dee3996 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/JavaUtils.java @@ -21,6 +21,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.lang.reflect.Constructor; import java.net.InetAddress; import java.net.UnknownHostException; @@ -55,6 +56,38 @@ public static ClassLoader getClassLoader() { } /** + * Create an object of the given class. + * @param theClass + * @param parameterTypes + * an array of parameterTypes for the constructor + * @param initargs + * the list of arguments for the constructor + */ + public static T newInstance(Class theClass, Class[] parameterTypes, + Object[] initargs) { + // Perform some sanity checks on the arguments. + if (parameterTypes.length != initargs.length) { + throw new IllegalArgumentException( + "Number of constructor parameter types doesn't match number of arguments"); + } + for (int i = 0; i < parameterTypes.length; i++) { + Class clazz = parameterTypes[i]; + if (initargs[i] != null && !(clazz.isInstance(initargs[i]))) { + throw new IllegalArgumentException("Object : " + initargs[i] + + " is not an instance of " + clazz); + } + } + + try { + Constructor meth = theClass.getDeclaredConstructor(parameterTypes); + meth.setAccessible(true); + return meth.newInstance(initargs); + } catch (Exception e) { + throw new RuntimeException("Unable to instantiate " + theClass.getName(), e); + } + } + + /** * @return name of current host */ public static String hostname() { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 37fc56bda8..77790adfff 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -17,11 +17,63 @@ */ package org.apache.hadoop.hive.metastore.utils; +import com.google.common.base.Predicates; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.lang.*; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; +import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.regex.Pattern; + public class MetaStoreUtils { + /** A fixed date format to be used for hive partition column values. */ + public static final ThreadLocal PARTITION_DATE_FORMAT = + new ThreadLocal() { + @Override + protected DateFormat initialValue() { + DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); + val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. + return val; + } + }; + private static final Charset ENCODING = StandardCharsets.UTF_8; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); /** @@ -53,4 +105,258 @@ public static String encodeTableName(String name) { } return sb.toString(); } + + /** + * convert Exception to MetaException, which sets the cause to such exception + * @param e cause of the exception + * @return the MetaException with the specified exception as the cause + */ + public static MetaException newMetaException(Exception e) { + return newMetaException(e != null ? e.getMessage() : null, e); + } + + /** + * convert Exception to MetaException, which sets the cause to such exception + * @param errorMessage the error message for this MetaException + * @param e cause of the exception + * @return the MetaException with the specified exception as the cause + */ + public static MetaException newMetaException(String errorMessage, Exception e) { + MetaException metaException = new MetaException(errorMessage); + if (e != null) { + metaException.initCause(e); + } + return metaException; + } + + /** + * Helper function to transform Nulls to empty strings. + */ + private static final com.google.common.base.Function transFormNullsToEmptyString + = new com.google.common.base.Function() { + @Override + public java.lang.String apply(@Nullable java.lang.String string) { + return org.apache.commons.lang.StringUtils.defaultString(string); + } + }; + /** + * We have aneed to sanity-check the map before conversion from persisted objects to + * metadata thrift objects because null values in maps will cause a NPE if we send + * across thrift. Pruning is appropriate for most cases except for databases such as + * Oracle where Empty strings are stored as nulls, in which case we need to handle that. + * See HIVE-8485 for motivations for this. + */ + public static Map trimMapNulls( + Map dnMap, boolean retrieveMapNullsAsEmptyStrings){ + if (dnMap == null){ + return null; + } + // Must be deterministic order map - see HIVE-8707 + // => we use Maps.newLinkedHashMap instead of Maps.newHashMap + if (retrieveMapNullsAsEmptyStrings) { + // convert any nulls present in map values to empty strings - this is done in the case + // of backing dbs like oracle which persist empty strings as nulls. + return Maps.newLinkedHashMap(Maps.transformValues(dnMap, transFormNullsToEmptyString)); + } else { + // prune any nulls present in map values - this is the typical case. + return Maps.newLinkedHashMap(Maps.filterValues(dnMap, Predicates.notNull())); + } + } + + + // given a list of partStats, this function will give you an aggr stats + public static List aggrPartitionStats(List partStats, + String dbName, String tableName, List partNames, List colNames, + boolean useDensityFunctionForNDVEstimation, double ndvTuner) + throws MetaException { + // 1. group by the stats by colNames + // map the colName to List + Map> map = new HashMap<>(); + for (ColumnStatistics css : partStats) { + List objs = css.getStatsObj(); + for (ColumnStatisticsObj obj : objs) { + List singleObj = new ArrayList<>(); + singleObj.add(obj); + ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj); + if (!map.containsKey(obj.getColName())) { + map.put(obj.getColName(), new ArrayList()); + } + map.get(obj.getColName()).add(singleCS); + } + } + return MetaStoreUtils.aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner); + } + + public static List aggrPartitionStats( + Map> map, String dbName, String tableName, + final List partNames, List colNames, + final boolean useDensityFunctionForNDVEstimation,final double ndvTuner) throws MetaException { + List colStats = new ArrayList<>(); + // 2. Aggregate stats for each column in a separate thread + if (map.size()< 1) { + //stats are absent in RDBMS + LOG.debug("No stats data found for: dbName=" +dbName +" tblName=" + tableName + + " partNames= " + partNames + " colNames=" + colNames ); + return colStats; + } + final ExecutorService pool = Executors.newFixedThreadPool(Math.min(map.size(), 16), + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("aggr-col-stats-%d").build()); + final List> futures = Lists.newLinkedList(); + + long start = System.currentTimeMillis(); + for (final Map.Entry> entry : map.entrySet()) { + futures.add(pool.submit(new Callable() { + @Override + public ColumnStatisticsObj call() throws Exception { + List css = entry.getValue(); + ColumnStatsAggregator aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(css + .iterator().next().getStatsObj().iterator().next().getStatsData().getSetField(), + useDensityFunctionForNDVEstimation, ndvTuner); + ColumnStatisticsObj statsObj = aggregator.aggregate(entry.getKey(), partNames, css); + return statsObj; + }})); + } + pool.shutdown(); + for (Future future : futures) { + try { + colStats.add(future.get()); + } catch (InterruptedException | ExecutionException e) { + pool.shutdownNow(); + LOG.debug(e.toString()); + throw new MetaException(e.toString()); + } + } + LOG.debug("Time for aggr col stats in seconds: {} Threads used: {}", + ((System.currentTimeMillis() - (double)start))/1000, Math.min(map.size(), 16)); + return colStats; + } + + public static double decimalToDouble(Decimal decimal) { + return new BigDecimal(new BigInteger(decimal.getUnscaled()), decimal.getScale()).doubleValue(); + } + + public static String[] getQualifiedName(String defaultDbName, String tableName) { + String[] names = tableName.split("\\."); + if (names.length == 1) { + return new String[] { defaultDbName, tableName}; + } + return names; + } + + public static void validatePartitionNameCharacters(List partVals, + Pattern partitionValidationPattern) throws MetaException { + + String invalidPartitionVal = getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern); + if (invalidPartitionVal != null) { + throw new MetaException("Partition value '" + invalidPartitionVal + + "' contains a character " + "not matched by whitelist pattern '" + + partitionValidationPattern.toString() + "'. " + "(configure with " + + MetastoreConf.ConfVars.PARTITION_NAME_WHITELIST_PATTERN.varname + ")"); + } + } + + public static String getPartitionValWithInvalidCharacter(List partVals, + Pattern partitionValidationPattern) { + if (partitionValidationPattern == null) { + return null; + } + + for (String partVal : partVals) { + if (!partitionValidationPattern.matcher(partVal).matches()) { + return partVal; + } + } + + return null; + } + + /** + * Produce a hash for the storage descriptor + * @param sd storage descriptor to hash + * @param md message descriptor to use to generate the hash + * @return the hash as a byte array + */ + public static byte[] hashStorageDescriptor(StorageDescriptor sd, MessageDigest md) { + // Note all maps and lists have to be absolutely sorted. Otherwise we'll produce different + // results for hashes based on the OS or JVM being used. + md.reset(); + for (FieldSchema fs : sd.getCols()) { + md.update(fs.getName().getBytes(ENCODING)); + md.update(fs.getType().getBytes(ENCODING)); + if (fs.getComment() != null) md.update(fs.getComment().getBytes(ENCODING)); + } + if (sd.getInputFormat() != null) { + md.update(sd.getInputFormat().getBytes(ENCODING)); + } + if (sd.getOutputFormat() != null) { + md.update(sd.getOutputFormat().getBytes(ENCODING)); + } + md.update(sd.isCompressed() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + md.update(Integer.toString(sd.getNumBuckets()).getBytes(ENCODING)); + if (sd.getSerdeInfo() != null) { + SerDeInfo serde = sd.getSerdeInfo(); + if (serde.getName() != null) { + md.update(serde.getName().getBytes(ENCODING)); + } + if (serde.getSerializationLib() != null) { + md.update(serde.getSerializationLib().getBytes(ENCODING)); + } + if (serde.getParameters() != null) { + SortedMap params = new TreeMap<>(serde.getParameters()); + for (Map.Entry param : params.entrySet()) { + md.update(param.getKey().getBytes(ENCODING)); + md.update(param.getValue().getBytes(ENCODING)); + } + } + } + if (sd.getBucketCols() != null) { + List bucketCols = new ArrayList<>(sd.getBucketCols()); + for (String bucket : bucketCols) md.update(bucket.getBytes(ENCODING)); + } + if (sd.getSortCols() != null) { + SortedSet orders = new TreeSet<>(sd.getSortCols()); + for (Order order : orders) { + md.update(order.getCol().getBytes(ENCODING)); + md.update(Integer.toString(order.getOrder()).getBytes(ENCODING)); + } + } + if (sd.getSkewedInfo() != null) { + SkewedInfo skewed = sd.getSkewedInfo(); + if (skewed.getSkewedColNames() != null) { + SortedSet colnames = new TreeSet<>(skewed.getSkewedColNames()); + for (String colname : colnames) md.update(colname.getBytes(ENCODING)); + } + if (skewed.getSkewedColValues() != null) { + SortedSet sortedOuterList = new TreeSet<>(); + for (List innerList : skewed.getSkewedColValues()) { + SortedSet sortedInnerList = new TreeSet<>(innerList); + sortedOuterList.add(org.apache.commons.lang.StringUtils.join(sortedInnerList, ".")); + } + for (String colval : sortedOuterList) md.update(colval.getBytes(ENCODING)); + } + if (skewed.getSkewedColValueLocationMaps() != null) { + SortedMap sortedMap = new TreeMap<>(); + for (Map.Entry, String> smap : skewed.getSkewedColValueLocationMaps().entrySet()) { + SortedSet sortedKey = new TreeSet<>(smap.getKey()); + sortedMap.put(org.apache.commons.lang.StringUtils.join(sortedKey, "."), smap.getValue()); + } + for (Map.Entry e : sortedMap.entrySet()) { + md.update(e.getKey().getBytes(ENCODING)); + md.update(e.getValue().getBytes(ENCODING)); + } + } + md.update(sd.isStoredAsSubDirectories() ? "true".getBytes(ENCODING) : "false".getBytes(ENCODING)); + } + + return md.digest(); + } + + public static List getColumnNamesForTable(Table table) { + List colNames = new ArrayList<>(); + Iterator colsIterator = table.getSd().getColsIterator(); + while (colsIterator.hasNext()) { + colNames.add(colsIterator.next().getName()); + } + return colNames; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java new file mode 100644 index 0000000000..5b49a251f8 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/ObjectPair.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.utils; + + + +public class ObjectPair { + private F first; + private S second; + + public ObjectPair() {} + + /** + * Creates a pair. Constructor doesn't infer template args but + * the method does, so the code becomes less ugly. + */ + public static ObjectPair create(T1 f, T2 s) { + return new ObjectPair<>(f, s); + } + + public ObjectPair(F first, S second) { + this.first = first; + this.second = second; + } + + public F getFirst() { + return first; + } + + public void setFirst(F first) { + this.first = first; + } + + public S getSecond() { + return second; + } + + public void setSecond(S second) { + this.second = second; + } + + @Override + public boolean equals(Object that) { + if (that == null) { + return false; + } + if (that instanceof ObjectPair) { + return this.equals((ObjectPair)that); + } + return false; + } + + public boolean equals(ObjectPair that) { + if (that == null) { + return false; + } + + return this.getFirst().equals(that.getFirst()) && + this.getSecond().equals(that.getSecond()); + } + + @Override + public int hashCode() { + return first.hashCode() * 31 + second.hashCode(); + } + + public String toString() { + return first + ":" + second; + } +} diff --git common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java index 792b8626da..883e2bdcdb 100644 --- common/src/test/org/apache/hadoop/hive/common/TestStatsSetupConst.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/TestStatsSetupConst.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java index 377f9c7920..e3a6f140b9 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/fm/TestFMSketchSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java similarity index 97% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java index 30f5ca3e61..f09cb8ce15 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLNoBias.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); @@ -100,7 +100,7 @@ public void testHLLNoBiasDisabledHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().enableNoBias(false).build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java index b4b8df1174..08955d73b6 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHLLSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -97,7 +97,7 @@ public void testHLLSparseSerialization() throws IOException { public void testHLLSparseSerializationHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.SPARSE).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -146,7 +146,7 @@ public void testHLLSparseNoBitPackingHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.SPARSE) .enableBitPacking(false).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -193,7 +193,7 @@ public void testHLLDenseSerialization() throws IOException { public void testHLLDenseSerializationHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.DENSE).build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); @@ -242,7 +242,7 @@ public void testHLLDenseNoBitPackingHalfDistinct() throws IOException { HyperLogLog hll = HyperLogLog.builder().setEncoding(EncodingType.DENSE).enableBitPacking(false) .build(); Random rand = new Random(SEED); - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { int val = rand.nextInt(size / 2); hll.addLong(val); diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java index 635073fc26..b0eaad8012 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java index 00fd785b6f..106a9ed1e5 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogDense.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().setEncoding(HyperLogLog.EncodingType.DENSE).build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java similarity index 98% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java index cfa58868e5..50c7ea1355 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestHyperLogLogSparse.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,7 +68,7 @@ public void testHLLAddHalfDistinct() { Random rand = new Random(size); HyperLogLog hll = HyperLogLog.builder().build(); int unique = size / 2; - Set hashset = new HashSet(); + Set hashset = new HashSet<>(); for (int i = 0; i < size; i++) { long val = rand.nextInt(unique); hashset.add(val); diff --git common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java similarity index 99% rename from common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java index 2c7e89b5e6..bcabe9511c 100644 --- common/src/test/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/common/ndv/hll/TestSparseEncodeHash.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java similarity index 99% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java index ea11ed56b4..3a8443a8c0 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestDeadline.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestDeadline.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java similarity index 60% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 67b798fa24..5c73d256c3 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -1,19 +1,19 @@ /* - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hive.metastore; @@ -21,9 +21,8 @@ Licensed to the Apache Software Foundation (ASF) under one import com.codahale.metrics.Counter; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -33,9 +32,6 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.Role; @@ -44,14 +40,12 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.model.MNotificationLog; -import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; -import org.junit.After; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; @@ -65,15 +59,6 @@ Licensed to the Apache Software Foundation (ASF) under one import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; public class TestObjectStore { private ObjectStore objectStore = null; @@ -106,7 +91,8 @@ public String convertExprToFilter(byte[] expr) throws MetaException { @Override public boolean filterPartitionsByExpr(List partColumns, - byte[] expr, String defaultPartitionName, List partitionNames) + byte[] expr, String defaultPartitionName, + List partitionNames) throws MetaException { return false; } @@ -129,73 +115,21 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName()); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); objectStore = new ObjectStore(); objectStore.setConf(conf); dropAllStoreObjects(objectStore); } - @After - public void tearDown() { - } - - /** - * Test notification operations - */ - @Test - public void testNotificationOps() throws InterruptedException { - final int NO_EVENT_ID = 0; - final int FIRST_EVENT_ID = 1; - final int SECOND_EVENT_ID = 2; - - NotificationEvent event = - new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), ""); - NotificationEventResponse eventResponse; - CurrentNotificationEventId eventId; - - // Verify that there is no notifications available yet - eventId = objectStore.getCurrentNotificationEventId(); - Assert.assertEquals(NO_EVENT_ID, eventId.getEventId()); - - // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID - objectStore.addNotificationEvent(event); - Assert.assertEquals(FIRST_EVENT_ID, event.getEventId()); - objectStore.addNotificationEvent(event); - Assert.assertEquals(SECOND_EVENT_ID, event.getEventId()); - - // Verify that objectStore fetches the latest notification event ID - eventId = objectStore.getCurrentNotificationEventId(); - Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId()); - - // Verify that getNextNotification() returns all events - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - Assert.assertEquals(2, eventResponse.getEventsSize()); - Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId()); - - // Verify that getNextNotification(last) returns events after a specified event - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID)); - Assert.assertEquals(1, eventResponse.getEventsSize()); - Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId()); - - // Verify that getNextNotification(last) returns zero events if there are no more notifications available - eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID)); - Assert.assertEquals(0, eventResponse.getEventsSize()); - - // Verify that cleanNotificationEvents() cleans up all old notifications - Thread.sleep(1); - objectStore.cleanNotificationEvents(1); - eventResponse = objectStore.getNextNotification(new NotificationEventRequest()); - Assert.assertEquals(0, eventResponse.getEventsSize()); - } - /** * Test database operations */ @Test - public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException { + public void testDatabaseOps() throws MetaException, InvalidObjectException, + NoSuchObjectException { Database db1 = new Database(DB1, "description", "locationurl", null); Database db2 = new Database(DB2, "description", "locationurl", null); objectStore.createDatabase(db1); @@ -219,25 +153,30 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSu * Test table operations */ @Test - public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, + InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), + StorageDescriptor sd1 = + new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap params = new HashMap<>(); params.put("EXTERNAL", "false"); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); + Table tbl1 = + new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); List tables = objectStore.getAllTables(DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); - StorageDescriptor sd2 = new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)), + StorageDescriptor sd2 = + new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); - Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE"); + Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, + "MANAGED_TABLE"); objectStore.alterTable(DB1, TABLE1, newTbl1); tables = objectStore.getTables(DB1, "new*"); Assert.assertEquals(1, tables.size()); @@ -251,11 +190,11 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO Assert.assertEquals(0, foreignKeys.size()); SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, - "pk_const_1", false, false, false); + "pk_const_1", false, false, false); objectStore.addPrimaryKeys(ImmutableList.of(pk)); SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col", - DB1, "new" + TABLE1, "fk_col", 1, - 0, 0, "fk_const_1", "pk_const_1", false, false, false); + DB1, "new" + TABLE1, "fk_col", 1, + 0, 0, "fk_const_1", "pk_const_1", false, false, false); objectStore.addForeignKeys(ImmutableList.of(fk)); // Retrieve from PK side @@ -265,7 +204,8 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO List fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); if (fks != null) { for (SQLForeignKey fkcol : fks) { - objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name()); + objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), + fkcol.getFk_name()); } } // Retrieve from FK side @@ -296,7 +236,8 @@ private StorageDescriptor createFakeSd(String location) { * Tests partition operations */ @Test - public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + public void testPartitionOps() throws MetaException, InvalidObjectException, + NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); StorageDescriptor sd = createFakeSd("location"); @@ -304,7 +245,9 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, ""); FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, ""); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, "MANAGED_TABLE"); + Table tbl1 = + new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), + tableParams, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); HashMap partitionParams = new HashMap<>(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -321,10 +264,10 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); Assert.assertEquals(2, numPartitions); objectStore.dropPartition(DB1, TABLE1, value1); @@ -384,10 +327,10 @@ public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchOb @Test public void testDirectSqlErrorMetrics() throws Exception { - HiveConf conf = new HiveConf(); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true); Metrics.initialize(conf); - conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " + "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter" ); @@ -491,93 +434,29 @@ public void testQueryCloseOnError() throws Exception { .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.anyObject()); } - @Ignore( - "This test is here to allow testing with other databases like mysql / postgres etc\n" - + " with user changes to the code. This cannot be run on apache derby because of\n" - + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html" - ) @Test - public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException { - - final int NUM_THREADS = 10; - CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS, - () -> LoggerFactory.getLogger("test") - .debug(NUM_THREADS + " threads going to add notification")); - - HiveConf conf = new HiveConf(); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); - /* - Below are the properties that need to be set based on what database this test is going to be run - */ - -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); -// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, -// "jdbc:mysql://localhost:3306/metastore_db"); -// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); -// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); - - /* - we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL - and we don't run the schema creation sql that includes the an insert for notification_sequence - which can be locked. the entry in notification_sequence happens via notification_event insertion. - */ - objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute(); - objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute(); - - objectStore.addNotificationEvent( - new NotificationEvent(0, 0, - EventMessage.EventType.CREATE_DATABASE.toString(), - "CREATE DATABASE DB initial")); - - ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS); - for (int i = 0; i < NUM_THREADS; i++) { - final int n = i; - - executorService.execute( - () -> { - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - String eventType = EventMessage.EventType.CREATE_DATABASE.toString(); - NotificationEvent dbEvent = - new NotificationEvent(0, 0, eventType, - "CREATE DATABASE DB" + n); - System.out.println("ADDING NOTIFICATION"); - - try { - cyclicBarrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new RuntimeException(e); - } - store.addNotificationEvent(dbEvent); - System.out.println("FINISH NOTIFICATION"); - }); - } - executorService.shutdown(); - assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS)); - - // we have to setup this again as the underlying PMF keeps getting reinitialized with original - // reference closed - ObjectStore store = new ObjectStore(); - store.setConf(conf); - - NotificationEventResponse eventResponse = store.getNextNotification( - new NotificationEventRequest()); - assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize()); - long previousId = 0; - for (NotificationEvent event : eventResponse.getEvents()) { - assertTrue("previous:" + previousId + " current:" + event.getEventId(), - previousId < event.getEventId()); - assertTrue(previousId + 1 == event.getEventId()); - previousId = event.getEventId(); - } + public void testRetryingExecutorSleep() throws Exception { + RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null); + Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0); } + @Ignore // See comment in ObjectStore.getDataSourceProps @Test - public void testRetryingExecutorSleep() throws Exception { - RetryingExecutor re = new ObjectStore.RetryingExecutor(new HiveConf(), null); - assertTrue("invalid sleep value", re.getSleepInterval() >= 0); + public void testNonConfDatanucleusValueSet() { + String key = "datanucleus.no.such.key"; + String value = "test_value"; + String key1 = "blabla.no.such.key"; + String value1 = "another_value"; + Assume.assumeTrue(System.getProperty(key) == null); + Configuration localConf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(localConf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + MockPartitionExpressionProxy.class.getName()); + localConf.set(key, value); + localConf.set(key1, value1); + objectStore = new ObjectStore(); + objectStore.setConf(localConf); + Assert.assertEquals(value, objectStore.getProp().getProperty(key)); + Assert.assertNull(objectStore.getProp().getProperty(key1)); } - } + diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java similarity index 87% rename from metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java index 68d65a86a8..25a6ecf3a1 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRawStoreProxy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,8 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; public class TestRawStoreProxy { @@ -47,10 +47,10 @@ public void exceptions() throws IllegalStateException, MetaException { @Test public void testExceptionDispatch() throws Throwable { - HiveConf hiveConf = new HiveConf(); - hiveConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 10, + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setTimeVar(conf, MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT, 10, TimeUnit.MILLISECONDS); - RawStoreProxy rsp = new RawStoreProxy(hiveConf, hiveConf, TestStore.class, 1); + RawStoreProxy rsp = new RawStoreProxy(conf, conf, TestStore.class, 1); try { rsp.invoke(null, TestStore.class.getMethod("exceptions"), new Object[] {}); fail("an exception is expected"); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java similarity index 87% rename from metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index b6d2df59cd..d486f7c7a9 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +23,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy; @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -56,9 +57,9 @@ @Before public void setUp() throws Exception { - HiveConf conf = new HiveConf(); - conf.setBoolean(HiveConf.ConfVars.HIVE_IN_TEST.varname, true); - conf.setVar(HiveConf.ConfVars.METASTORE_EXPRESSION_PROXY_CLASS, + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, MockPartitionExpressionProxy.class.getName()); objectStore = new ObjectStore(); objectStore.setConf(conf); @@ -88,7 +89,7 @@ public void testDatabaseOps() throws Exception { String dbName = "testDatabaseOps"; String dbDescription = "testDatabaseOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -179,7 +180,7 @@ public void testTableOps() throws Exception { String dbName = "testTableOps"; String dbDescription = "testTableOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -193,18 +194,18 @@ public void testTableOps() throws Exception { String serdeLocation = "file:/tmp"; FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); - SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap()); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); + SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>()); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); sd.setStoredAsSubDirectories(false); Table tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); @@ -221,7 +222,7 @@ public void testTableOps() throws Exception { // Add a new table via CachedStore String tblName1 = "tbl1"; Table tbl1 = - new Table(tblName1, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName1, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl1); tbl1 = cachedStore.getTable(dbName, tblName1); @@ -233,7 +234,7 @@ public void testTableOps() throws Exception { // Add a new table via ObjectStore String tblName2 = "tbl2"; Table tbl2 = - new Table(tblName2, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName2, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl2); tbl2 = objectStore.getTable(dbName, tblName2); @@ -241,7 +242,7 @@ public void testTableOps() throws Exception { // Alter table "tbl" via ObjectStore tblOwner = "user2"; tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.alterTable(dbName, tblName, tbl); tbl = objectStore.getTable(dbName, tblName); @@ -283,7 +284,7 @@ public void testPartitionOps() throws Exception { String dbName = "testPartitionOps"; String dbDescription = "testPartitionOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -297,17 +298,17 @@ public void testPartitionOps() throws Exception { String serdeLocation = "file:/tmp"; FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); FieldSchema col2 = new FieldSchema("col2", "string", "string column"); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", null); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column"); - List ptnCols = new ArrayList(); + List ptnCols = new ArrayList<>(); ptnCols.add(ptnCol1); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, @@ -315,7 +316,7 @@ public void testPartitionOps() throws Exception { objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); final String ptnColVal1 = "aaa"; - Map partParams = new HashMap(); + Map partParams = new HashMap<>(); Partition ptn1 = new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, sd, partParams); objectStore.addPartition(ptn1); @@ -384,7 +385,7 @@ public void testTableColStatsOps() throws Exception { String dbName = "testTableColStatsOps"; String dbDescription = "testTableColStatsOps"; String dbLocation = "file:/tmp"; - Map dbParams = new HashMap(); + Map dbParams = new HashMap<>(); String dbOwner = "user1"; Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); @@ -413,18 +414,18 @@ public void testTableColStatsOps() throws Exception { long col3NumTrues = 100; long col3NumFalses = 30; long col3Nulls = 10; - final List cols = new ArrayList(); + final List cols = new ArrayList<>(); cols.add(col1); cols.add(col2); cols.add(col3); - Map serdeParams = new HashMap(); - Map tblParams = new HashMap(); + Map serdeParams = new HashMap<>(); + Map tblParams = new HashMap<>(); final SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", null); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); Table tbl = - new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList(), tblParams, + new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, new ArrayList<>(), tblParams, null, null, TableType.MANAGED_TABLE.toString()); objectStore.createTable(tbl); tbl = objectStore.getTable(dbName, tblName); @@ -432,7 +433,7 @@ public void testTableColStatsOps() throws Exception { // Add ColumnStatistics for tbl to metastore DB via ObjectStore ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); // Col1 ColumnStatisticsData data1 = new ColumnStatisticsData(); @@ -530,53 +531,53 @@ public void testSharedStoreDb() { public void testSharedStoreTable() { Table tbl1 = new Table(); StorageDescriptor sd1 = new StorageDescriptor(); - List cols1 = new ArrayList(); + List cols1 = new ArrayList<>(); cols1.add(new FieldSchema("col1", "int", "")); - Map params1 = new HashMap(); + Map params1 = new HashMap<>(); params1.put("key", "value"); sd1.setCols(cols1); sd1.setParameters(params1); sd1.setLocation("loc1"); tbl1.setSd(sd1); - tbl1.setPartitionKeys(new ArrayList()); + tbl1.setPartitionKeys(new ArrayList<>()); Table tbl2 = new Table(); StorageDescriptor sd2 = new StorageDescriptor(); - List cols2 = new ArrayList(); + List cols2 = new ArrayList<>(); cols2.add(new FieldSchema("col1", "int", "")); - Map params2 = new HashMap(); + Map params2 = new HashMap<>(); params2.put("key", "value"); sd2.setCols(cols2); sd2.setParameters(params2); sd2.setLocation("loc2"); tbl2.setSd(sd2); - tbl2.setPartitionKeys(new ArrayList()); + tbl2.setPartitionKeys(new ArrayList<>()); Table tbl3 = new Table(); StorageDescriptor sd3 = new StorageDescriptor(); - List cols3 = new ArrayList(); + List cols3 = new ArrayList<>(); cols3.add(new FieldSchema("col3", "int", "")); - Map params3 = new HashMap(); + Map params3 = new HashMap<>(); params3.put("key2", "value2"); sd3.setCols(cols3); sd3.setParameters(params3); sd3.setLocation("loc3"); tbl3.setSd(sd3); - tbl3.setPartitionKeys(new ArrayList()); + tbl3.setPartitionKeys(new ArrayList<>()); Table newTbl1 = new Table(); newTbl1.setDbName("db2"); newTbl1.setTableName("tbl1"); StorageDescriptor newSd1 = new StorageDescriptor(); - List newCols1 = new ArrayList(); + List newCols1 = new ArrayList<>(); newCols1.add(new FieldSchema("newcol1", "int", "")); - Map newParams1 = new HashMap(); + Map newParams1 = new HashMap<>(); newParams1.put("key", "value"); newSd1.setCols(newCols1); newSd1.setParameters(params1); newSd1.setLocation("loc1"); newTbl1.setSd(newSd1); - newTbl1.setPartitionKeys(new ArrayList()); + newTbl1.setPartitionKeys(new ArrayList<>()); sharedCache.addTableToCache("db1", "tbl1", tbl1); sharedCache.addTableToCache("db1", "tbl2", tbl2); @@ -607,9 +608,9 @@ public void testSharedStoreTable() { public void testSharedStorePartition() { Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); - List cols1 = new ArrayList(); + List cols1 = new ArrayList<>(); cols1.add(new FieldSchema("col1", "int", "")); - Map params1 = new HashMap(); + Map params1 = new HashMap<>(); params1.put("key", "value"); sd1.setCols(cols1); sd1.setParameters(params1); @@ -619,9 +620,9 @@ public void testSharedStorePartition() { Partition part2 = new Partition(); StorageDescriptor sd2 = new StorageDescriptor(); - List cols2 = new ArrayList(); + List cols2 = new ArrayList<>(); cols2.add(new FieldSchema("col1", "int", "")); - Map params2 = new HashMap(); + Map params2 = new HashMap<>(); params2.put("key", "value"); sd2.setCols(cols2); sd2.setParameters(params2); @@ -631,9 +632,9 @@ public void testSharedStorePartition() { Partition part3 = new Partition(); StorageDescriptor sd3 = new StorageDescriptor(); - List cols3 = new ArrayList(); + List cols3 = new ArrayList<>(); cols3.add(new FieldSchema("col3", "int", "")); - Map params3 = new HashMap(); + Map params3 = new HashMap<>(); params3.put("key2", "value2"); sd3.setCols(cols3); sd3.setParameters(params3); @@ -645,9 +646,9 @@ public void testSharedStorePartition() { newPart1.setDbName("db1"); newPart1.setTableName("tbl1"); StorageDescriptor newSd1 = new StorageDescriptor(); - List newCols1 = new ArrayList(); + List newCols1 = new ArrayList<>(); newCols1.add(new FieldSchema("newcol1", "int", "")); - Map newParams1 = new HashMap(); + Map newParams1 = new HashMap<>(); newParams1.put("key", "value"); newSd1.setCols(newCols1); newSd1.setParameters(params1); @@ -688,35 +689,35 @@ public void testAggrStatsRepeatedRead() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -734,9 +735,9 @@ public void testAggrStatsRepeatedRead() throws Exception { cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); @@ -754,35 +755,35 @@ public void testPartitionAggrStats() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -802,9 +803,9 @@ public void testPartitionAggrStats() throws Exception { longStats.setNumDVs(40); cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); @@ -824,35 +825,35 @@ public void testPartitionAggrStatsBitVector() throws Exception { Database db = new Database(dbName, null, "some_location", null); cachedStore.createDatabase(db); - List cols = new ArrayList(); + List cols = new ArrayList<>(); cols.add(new FieldSchema(colName, "int", null)); - List partCols = new ArrayList(); + List partCols = new ArrayList<>(); partCols.add(new FieldSchema("col", "int", null)); StorageDescriptor sd = - new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap()), + new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()), null, null, null); Table tbl = - new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap(), + new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); cachedStore.createTable(tbl); - List partVals1 = new ArrayList(); + List partVals1 = new ArrayList<>(); partVals1.add("1"); - List partVals2 = new ArrayList(); + List partVals2 = new ArrayList<>(); partVals2.add("2"); Partition ptn1 = - new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn1); Partition ptn2 = - new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap()); + new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName); statsDesc.setPartName("col"); - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); ColumnStatisticsData data = new ColumnStatisticsData(); ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data); @@ -886,9 +887,9 @@ public void testPartitionAggrStatsBitVector() throws Exception { cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add(colName); - List aggrPartVals = new ArrayList(); + List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames);