diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index cdfc60c994..285f30b008 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -53,6 +53,10 @@ public void setUp() throws Exception { rawStore = new ObjectStore(); rawStore.setConf(hmsHandler.getConf()); + + CachedStore cachedStore = new CachedStore(); + CachedStore.clearSharedCache(); + cachedStore.setConfForTest(conf); sharedCache = CachedStore.getSharedCache(); // Stop the CachedStore cache update service. We'll start it explicitly to control the test @@ -190,7 +194,7 @@ public void testDatabaseOpsForUpdateUsingEvents() throws Exception { hmsHandler.drop_database(dbName, true, true); hmsHandler.drop_database(dbName2, true, true); sharedCache.getDatabaseCache().clear(); - sharedCache.getTableCache().clear(); + sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } @@ -267,7 +271,7 @@ public void testTableOpsForUpdateUsingEvents() throws Exception { Assert.assertNull(tblRead); sharedCache.getDatabaseCache().clear(); - sharedCache.getTableCache().clear(); + sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } @@ -379,7 +383,7 @@ public void testPartitionOpsForUpdateUsingEvents() throws Exception { // Clean up rawStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); - sharedCache.getTableCache().clear(); + sharedCache.clearTableCache(); sharedCache.getSdCache().clear(); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index d50fa137f0..bb673f428f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -65,7 +65,7 @@ public static String buildTableColKey(String catName, String dbName, String tabl return buildKey(catName, dbName, tableName, colName); } - private static String buildKey(String... elements) { + public static String buildKey(String... elements) { return org.apache.commons.lang.StringUtils.join(elements, delimit); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index 1552ea0b8d..c08ae5fbe0 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.metastore.cache; - import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; @@ -52,7 +51,6 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.HiveAlterHandler; -import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; @@ -120,14 +118,15 @@ private Configuration conf; private static boolean areTxnStatsSupported; private PartitionExpressionProxy expressionProxy = null; + private static String lock = "L"; + private static boolean sharedCacheInited = false; private static SharedCache sharedCache = new SharedCache(); - private static boolean canUseEvents = false; + private static boolean canUseEvents = false; private static long lastEventId; - static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); - @Override - public void setConf(Configuration conf) { + @Override public void setConf(Configuration conf) { setConfInternal(conf); initBlackListWhiteList(conf); initSharedCache(conf); @@ -140,12 +139,16 @@ public void setConf(Configuration conf) { * @param conf */ void setConfForTest(Configuration conf) { + setConfForTestExceptSharedCache(conf); + initSharedCache(conf); + } + + void setConfForTestExceptSharedCache(Configuration conf) { setConfInternal(conf); initBlackListWhiteList(conf); - initSharedCache(conf); } - synchronized private static void triggerUpdateUsingEvent(RawStore rawStore) { + private static synchronized void triggerUpdateUsingEvent(RawStore rawStore) { if (!isCachePrewarmed.get()) { LOG.error("cache update should be done only after prewarm"); throw new RuntimeException("cache update should be done only after prewarm"); @@ -159,12 +162,12 @@ synchronized private static void triggerUpdateUsingEvent(RawStore rawStore) { throw new RuntimeException(e.getMessage()); } finally { long endTime = System.nanoTime(); - LOG.info("Time taken in updateUsingNotificationEvents for num events : " + (lastEventId - preEventId) + " = " + - (endTime - startTime) / 1000000 + "ms"); + LOG.info("Time taken in updateUsingNotificationEvents for num events : " + (lastEventId - preEventId) + " = " + + (endTime - startTime) / 1000000 + "ms"); } } - synchronized private static void triggerPreWarm(RawStore rawStore) { + private static synchronized void triggerPreWarm(RawStore rawStore) { lastEventId = rawStore.getCurrentNotificationEventId().getEventId(); prewarm(rawStore); } @@ -177,8 +180,7 @@ private void setConfInternal(Configuration conf) { } LOG.info("canUseEvents is set to " + canUseEvents + " in cached Store"); - String rawStoreClassName = - MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); + String rawStoreClassName = MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance(); @@ -198,38 +200,38 @@ private void setConfInternal(Configuration conf) { } private void initSharedCache(Configuration conf) { - long maxSharedCacheSizeInBytes = - MetastoreConf.getSizeVar(conf, ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY); - sharedCache.initialize(maxSharedCacheSizeInBytes); - if (maxSharedCacheSizeInBytes > 0) { - LOG.info("Maximum memory that the cache will use: {} KB", - maxSharedCacheSizeInBytes / (1024)); + synchronized (lock) { + if (!sharedCacheInited) { + sharedCacheInited = true; + SharedCache.Builder builder = new SharedCache.Builder(); + builder.configuration(conf).build(sharedCache); + } } } - @VisibleForTesting - public static SharedCache getSharedCache() { - return sharedCache; + @VisibleForTesting public static SharedCache getSharedCache() { + return sharedCache; } - static private ColumnStatistics updateStatsForAlterPart(RawStore rawStore, Table before, String catalogName, - String dbName, String tableName, Partition part) throws Exception { + private static ColumnStatistics updateStatsForAlterPart(RawStore rawStore, Table before, String catalogName, + String dbName, String tableName, Partition part) throws Exception { ColumnStatistics colStats; List deletedCols = new ArrayList<>(); - colStats = HiveAlterHandler.updateOrGetPartitionColumnStats(rawStore, catalogName, dbName, tableName, - part.getValues(), part.getSd().getCols(), before, part, null, deletedCols); + colStats = HiveAlterHandler + .updateOrGetPartitionColumnStats(rawStore, catalogName, dbName, tableName, part.getValues(), + part.getSd().getCols(), before, part, null, deletedCols); for (String column : deletedCols) { sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, part.getValues(), column); } if (colStats != null) { - sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, part.getWriteId(), - part.getValues(), part.getParameters(), colStats.getStatsObj()); + sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, part.getWriteId(), part.getValues(), + part.getParameters(), colStats.getStatsObj()); } return colStats; } - static private void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, Table tblAfter, String catalogName, - String dbName, String tableName) throws Exception { + private static void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, Table tblAfter, String catalogName, + String dbName, String tableName) throws Exception { ColumnStatistics colStats = null; List deletedCols = new ArrayList<>(); if (tblBefore.isSetPartitionKeys()) { @@ -239,19 +241,19 @@ static private void updateStatsForAlterTable(RawStore rawStore, Table tblBefore, } } - List statisticsObjs = HiveAlterHandler.alterTableUpdateTableColumnStats(rawStore, tblBefore, - tblAfter,null, null, rawStore.getConf(), deletedCols); + List statisticsObjs = HiveAlterHandler + .alterTableUpdateTableColumnStats(rawStore, tblBefore, tblAfter, null, null, rawStore.getConf(), deletedCols); if (colStats != null) { - sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, tblAfter.getWriteId(), - statisticsObjs, tblAfter.getParameters()); + sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, tblAfter.getWriteId(), statisticsObjs, + tblAfter.getParameters()); } for (String column : deletedCols) { sharedCache.removeTableColStatsFromCache(catalogName, dbName, tableName, column); } } - @VisibleForTesting - public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) throws Exception { + @VisibleForTesting public static long updateUsingNotificationEvents(RawStore rawStore, long lastEventId) + throws Exception { LOG.debug("updating cache using notification events starting from event id " + lastEventId); NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); @@ -305,96 +307,95 @@ public static long updateUsingNotificationEvents(RawStore rawStore, long lastEve continue; } switch (event.getEventType()) { - case MessageBuilder.ADD_PARTITION_EVENT: - AddPartitionMessage addPartMessage = deserializer.getAddPartitionMessage(message); - sharedCache.addPartitionsToCache(catalogName, - dbName, tableName, addPartMessage.getPartitionObjs()); - break; - case MessageBuilder.ALTER_PARTITION_EVENT: - AlterPartitionMessage alterPartitionMessage = deserializer.getAlterPartitionMessage(message); - sharedCache.alterPartitionInCache(catalogName, dbName, tableName, - alterPartitionMessage.getPtnObjBefore().getValues(), alterPartitionMessage.getPtnObjAfter()); - //TODO : Use the stat object stored in the alter table message to update the stats in cache. - updateStatsForAlterPart(rawStore, alterPartitionMessage.getTableObj(), - catalogName, dbName, tableName, alterPartitionMessage.getPtnObjAfter()); - break; - case MessageBuilder.DROP_PARTITION_EVENT: - DropPartitionMessage dropPartitionMessage = deserializer.getDropPartitionMessage(message); - for (Map partMap : dropPartitionMessage.getPartitions()) { - sharedCache.removePartitionFromCache(catalogName, dbName, tableName, new ArrayList<>(partMap.values())); - } - break; - case MessageBuilder.CREATE_TABLE_EVENT: - CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); - sharedCache.addTableToCache(catalogName, dbName, - tableName, createTableMessage.getTableObj()); - break; - case MessageBuilder.ALTER_TABLE_EVENT: - AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); - sharedCache.alterTableInCache(catalogName, dbName, tableName, alterTableMessage.getTableObjAfter()); - //TODO : Use the stat object stored in the alter table message to update the stats in cache. - updateStatsForAlterTable(rawStore, alterTableMessage.getTableObjBefore(), alterTableMessage.getTableObjAfter(), - catalogName, dbName, tableName); - break; - case MessageBuilder.DROP_TABLE_EVENT: - DropTableMessage dropTableMessage = deserializer.getDropTableMessage(message); - int batchSize = MetastoreConf.getIntVar(rawStore.getConf(), ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); - String tableDnsPath = null; - Path tablePath = new Path(dropTableMessage.getTableObj().getSd().getLocation()); - if (tablePath != null) { - tableDnsPath = new Warehouse(rawStore.getConf()).getDnsPath(tablePath).toString(); - } + case MessageBuilder.ADD_PARTITION_EVENT: + AddPartitionMessage addPartMessage = deserializer.getAddPartitionMessage(message); + sharedCache.addPartitionsToCache(catalogName, dbName, tableName, addPartMessage.getPartitionObjs()); + break; + case MessageBuilder.ALTER_PARTITION_EVENT: + AlterPartitionMessage alterPartitionMessage = deserializer.getAlterPartitionMessage(message); + sharedCache + .alterPartitionInCache(catalogName, dbName, tableName, alterPartitionMessage.getPtnObjBefore().getValues(), + alterPartitionMessage.getPtnObjAfter()); + //TODO : Use the stat object stored in the alter table message to update the stats in cache. + updateStatsForAlterPart(rawStore, alterPartitionMessage.getTableObj(), catalogName, dbName, tableName, + alterPartitionMessage.getPtnObjAfter()); + break; + case MessageBuilder.DROP_PARTITION_EVENT: + DropPartitionMessage dropPartitionMessage = deserializer.getDropPartitionMessage(message); + for (Map partMap : dropPartitionMessage.getPartitions()) { + sharedCache.removePartitionFromCache(catalogName, dbName, tableName, new ArrayList<>(partMap.values())); + } + break; + case MessageBuilder.CREATE_TABLE_EVENT: + CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(message); + sharedCache.addTableToCache(catalogName, dbName, tableName, createTableMessage.getTableObj()); + break; + case MessageBuilder.ALTER_TABLE_EVENT: + AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(message); + sharedCache.alterTableInCache(catalogName, dbName, tableName, alterTableMessage.getTableObjAfter()); + //TODO : Use the stat object stored in the alter table message to update the stats in cache. + updateStatsForAlterTable(rawStore, alterTableMessage.getTableObjBefore(), alterTableMessage.getTableObjAfter(), + catalogName, dbName, tableName); + break; + case MessageBuilder.DROP_TABLE_EVENT: + DropTableMessage dropTableMessage = deserializer.getDropTableMessage(message); + int batchSize = MetastoreConf.getIntVar(rawStore.getConf(), ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); + String tableDnsPath = null; + Path tablePath = new Path(dropTableMessage.getTableObj().getSd().getLocation()); + if (tablePath != null) { + tableDnsPath = new Warehouse(rawStore.getConf()).getDnsPath(tablePath).toString(); + } - while (true) { - Map partitionLocations = rawStore.getPartitionLocations(catalogName, dbName, tableName, - tableDnsPath, batchSize); - if (partitionLocations == null || partitionLocations.isEmpty()) { - break; - } - sharedCache.removePartitionFromCache(catalogName, dbName, tableName, - new ArrayList<>(partitionLocations.values())); + while (true) { + Map partitionLocations = + rawStore.getPartitionLocations(catalogName, dbName, tableName, tableDnsPath, batchSize); + if (partitionLocations == null || partitionLocations.isEmpty()) { + break; } - sharedCache.removeTableFromCache(catalogName, dbName, tableName); - break; - case MessageBuilder.CREATE_DATABASE_EVENT: - CreateDatabaseMessage createDatabaseMessage = deserializer.getCreateDatabaseMessage(message); - sharedCache.addDatabaseToCache(createDatabaseMessage.getDatabaseObject()); - break; - case MessageBuilder.ALTER_DATABASE_EVENT: - AlterDatabaseMessage alterDatabaseMessage = deserializer.getAlterDatabaseMessage(message); - sharedCache.alterDatabaseInCache(catalogName, dbName, alterDatabaseMessage.getDbObjAfter()); - break; - case MessageBuilder.DROP_DATABASE_EVENT: - sharedCache.removeDatabaseFromCache(catalogName, dbName); - break; - case MessageBuilder.CREATE_CATALOG_EVENT: - case MessageBuilder.DROP_CATALOG_EVENT: - case MessageBuilder.ALTER_CATALOG_EVENT: - // TODO : Need to add cache invalidation for catalog events - LOG.error("catalog Events are not supported for cache invalidation : " + event.getEventType()); - break; - case MessageBuilder.UPDATE_TBL_COL_STAT_EVENT: - UpdateTableColumnStatMessage msg = deserializer.getUpdateTableColumnStatMessage(message); - sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, msg.getWriteId(), - msg.getColumnStatistics().getStatsObj(), msg.getParameters()); - break; - case MessageBuilder.DELETE_TBL_COL_STAT_EVENT: - DeleteTableColumnStatMessage msgDel = deserializer.getDeleteTableColumnStatMessage(message); - sharedCache.removeTableColStatsFromCache(catalogName, dbName, tableName, msgDel.getColName()); - break; - case MessageBuilder.UPDATE_PART_COL_STAT_EVENT: - UpdatePartitionColumnStatMessage msgPartUpdate = deserializer.getUpdatePartitionColumnStatMessage(message); - sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, msgPartUpdate.getWriteId(), - msgPartUpdate.getPartVals(), msgPartUpdate.getParameters(), - msgPartUpdate.getColumnStatistics().getStatsObj()); - break; - case MessageBuilder.DELETE_PART_COL_STAT_EVENT: - DeletePartitionColumnStatMessage msgPart = deserializer.getDeletePartitionColumnStatMessage(message); - sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, - msgPart.getPartValues(), msgPart.getColName()); - break; - default: - LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); + sharedCache + .removePartitionFromCache(catalogName, dbName, tableName, new ArrayList<>(partitionLocations.values())); + } + sharedCache.removeTableFromCache(catalogName, dbName, tableName); + break; + case MessageBuilder.CREATE_DATABASE_EVENT: + CreateDatabaseMessage createDatabaseMessage = deserializer.getCreateDatabaseMessage(message); + sharedCache.addDatabaseToCache(createDatabaseMessage.getDatabaseObject()); + break; + case MessageBuilder.ALTER_DATABASE_EVENT: + AlterDatabaseMessage alterDatabaseMessage = deserializer.getAlterDatabaseMessage(message); + sharedCache.alterDatabaseInCache(catalogName, dbName, alterDatabaseMessage.getDbObjAfter()); + break; + case MessageBuilder.DROP_DATABASE_EVENT: + sharedCache.removeDatabaseFromCache(catalogName, dbName); + break; + case MessageBuilder.CREATE_CATALOG_EVENT: + case MessageBuilder.DROP_CATALOG_EVENT: + case MessageBuilder.ALTER_CATALOG_EVENT: + // TODO : Need to add cache invalidation for catalog events + LOG.error("catalog Events are not supported for cache invalidation : " + event.getEventType()); + break; + case MessageBuilder.UPDATE_TBL_COL_STAT_EVENT: + UpdateTableColumnStatMessage msg = deserializer.getUpdateTableColumnStatMessage(message); + sharedCache.alterTableAndStatsInCache(catalogName, dbName, tableName, msg.getWriteId(), + msg.getColumnStatistics().getStatsObj(), msg.getParameters()); + break; + case MessageBuilder.DELETE_TBL_COL_STAT_EVENT: + DeleteTableColumnStatMessage msgDel = deserializer.getDeleteTableColumnStatMessage(message); + sharedCache.removeTableColStatsFromCache(catalogName, dbName, tableName, msgDel.getColName()); + break; + case MessageBuilder.UPDATE_PART_COL_STAT_EVENT: + UpdatePartitionColumnStatMessage msgPartUpdate = deserializer.getUpdatePartitionColumnStatMessage(message); + sharedCache.alterPartitionAndStatsInCache(catalogName, dbName, tableName, msgPartUpdate.getWriteId(), + msgPartUpdate.getPartVals(), msgPartUpdate.getParameters(), + msgPartUpdate.getColumnStatistics().getStatsObj()); + break; + case MessageBuilder.DELETE_PART_COL_STAT_EVENT: + DeletePartitionColumnStatMessage msgPart = deserializer.getDeletePartitionColumnStatMessage(message); + sharedCache.removePartitionColStatsFromCache(catalogName, dbName, tableName, msgPart.getPartValues(), + msgPart.getColName()); + break; + default: + LOG.error("Event is not supported for cache invalidation : " + event.getEventType()); } } return lastEventId; @@ -494,7 +495,7 @@ static void prewarm(RawStore rawStore) { AggrStats aggrStatsAllButDefaultPartition = null; if (!table.getPartitionKeys().isEmpty()) { Deadline.startTimer("getPartitions"); - partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); + partitions = rawStore.getPartitions(catName, dbName, tblName, -1); Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { @@ -535,8 +536,9 @@ static void prewarm(RawStore rawStore) { Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm - boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, partitionColStats, - aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); + boolean isSuccess = sharedCache + .populateTableInCache(table, tableColStats, partitions, partitionColStats, aggrStatsAllPartitions, + aggrStatsAllButDefaultPartition); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); } else { @@ -565,8 +567,16 @@ static void prewarm(RawStore rawStore) { } } + /** + * This method is only used for testing. Test method will init a new cache and use the new handle to query the cache + * to get content in the cache. In production, no code would/should call this method, because SharedCache should be + * a singleton. + */ @VisibleForTesting static void clearSharedCache() { + synchronized (lock) { + sharedCacheInited = false; + } sharedCache = new SharedCache(); } @@ -605,23 +615,20 @@ private synchronized void prioritizeTableForPrewarm(String tblName) { } } - @VisibleForTesting - static void setCachePrewarmedState(boolean state) { + @VisibleForTesting static void setCachePrewarmedState(boolean state) { isCachePrewarmed.set(state); } private static void initBlackListWhiteList(Configuration conf) { - whitelistPatterns = createPatterns(MetastoreConf.getAsString(conf, - MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST)); - blacklistPatterns = createPatterns(MetastoreConf.getAsString(conf, - MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST)); + whitelistPatterns = createPatterns( + MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST)); + blacklistPatterns = createPatterns( + MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST)); } private static Collection catalogsToCache(RawStore rs) throws MetaException { - Collection confValue = - MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); - if (confValue == null || confValue.isEmpty() || - (confValue.size() == 1 && confValue.contains(""))) { + Collection confValue = MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); + if (confValue == null || confValue.isEmpty() || (confValue.size() == 1 && confValue.contains(""))) { return rs.getCatalogs(); } else { return confValue; @@ -636,19 +643,17 @@ private static void initBlackListWhiteList(Configuration conf) { * @param conf * @param runOnlyOnce * @param shouldRunPrewarm - */ - static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, + */ static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce, boolean shouldRunPrewarm) { if (cacheUpdateMaster == null) { initBlackListWhiteList(conf); if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) { - cacheRefreshPeriodMS = MetastoreConf.getTimeVar(conf, - ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); + cacheRefreshPeriodMS = + MetastoreConf.getTimeVar(conf, ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); } LOG.info("CachedStore: starting cache update service (run every {} ms)", cacheRefreshPeriodMS); cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { + @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); t.setDaemon(true); @@ -656,24 +661,23 @@ public Thread newThread(Runnable r) { } }); if (!runOnlyOnce) { - cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, - cacheRefreshPeriodMS, TimeUnit.MILLISECONDS); + cacheUpdateMaster + .scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, cacheRefreshPeriodMS, + TimeUnit.MILLISECONDS); } - } + } if (runOnlyOnce) { // Some tests control the execution of the background update thread cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, TimeUnit.MILLISECONDS); } } - @VisibleForTesting - static synchronized boolean stopCacheUpdateService(long timeout) { + @VisibleForTesting static synchronized boolean stopCacheUpdateService(long timeout) { boolean tasksStoppedBeforeShutdown = false; if (cacheUpdateMaster != null) { LOG.info("CachedStore: shutting down cache update service"); try { - tasksStoppedBeforeShutdown = - cacheUpdateMaster.awaitTermination(timeout, TimeUnit.MILLISECONDS); + tasksStoppedBeforeShutdown = cacheUpdateMaster.awaitTermination(timeout, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { LOG.info("CachedStore: cache update service was interrupted while waiting for tasks to " + "complete before shutting down. Will make a hard stop now."); @@ -684,8 +688,7 @@ static synchronized boolean stopCacheUpdateService(long timeout) { return tasksStoppedBeforeShutdown; } - @VisibleForTesting - static void setCacheRefreshPeriod(long time) { + @VisibleForTesting static void setCacheRefreshPeriod(long time) { cacheRefreshPeriodMS = time; } @@ -693,7 +696,6 @@ static void setCacheRefreshPeriod(long time) { private boolean shouldRunPrewarm = true; private final RawStore rawStore; - CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { this.shouldRunPrewarm = shouldRunPrewarm; String rawStoreClassName = @@ -708,8 +710,7 @@ static void setCacheRefreshPeriod(long time) { } } - @Override - public void run() { + @Override public void run() { if (!shouldRunPrewarm) { if (canUseEvents) { try { @@ -814,8 +815,9 @@ private void updateTables(RawStore rawStore, String catName, String dbName) { if (!shouldCacheTable(catName, dbName, tblName)) { continue; } - Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName)); + Table table = rawStore + .getTable(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName)); tables.add(table); } success = sharedCache.refreshTablesInCache(catName, dbName, tables); @@ -862,10 +864,11 @@ private void updateTablePartitions(RawStore rawStore, String catName, String dbN dbName, tblName); try { Deadline.startTimer("getPartitions"); - List partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); + List partitions = rawStore.getPartitions(catName, dbName, tblName, -1); Deadline.stopTimer(); - sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), partitions); + sharedCache + .refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), + StringUtils.normalizeIdentifier(tblName), partitions); LOG.debug("CachedStore: updated cached partition objects for catalog: {}, database: {}, table: {}", catName, dbName, tblName); } catch (MetaException | NoSuchObjectException e) { @@ -886,7 +889,7 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); List partitionColStats = - rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); Deadline.startTimer("getPartitionsByNames"); @@ -914,7 +917,8 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str // but default partition private static void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { - LOG.debug("CachedStore: updating cached aggregate partition col stats objects for catalog: {}, database: {}, table: {}", + LOG.debug( + "CachedStore: updating cached aggregate partition col stats objects for catalog: {}, database: {}, table: {}", catName, dbName, tblName); try { Table table = rawStore.getTable(catName, dbName, tblName); @@ -943,10 +947,10 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, aggrStatsAllButDefaultPartition, null); - LOG.debug("CachedStore: updated cached aggregate partition col stats objects for catalog: {}, database: {}, table: {}", - catName, dbName, tblName); + LOG.debug("CachedStore: updated cached aggregate partition col stats objects for catalog:" + + " {}, database: {}, table: {}", catName, dbName, tblName); } } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName, e); @@ -954,23 +958,19 @@ private static void updateTableAggregatePartitionColStats(RawStore rawStore, Str } } - @Override - public Configuration getConf() { + @Override public Configuration getConf() { return rawStore.getConf(); } - @Override - public void shutdown() { + @Override public void shutdown() { rawStore.shutdown(); } - @Override - public boolean openTransaction() { + @Override public boolean openTransaction() { return rawStore.openTransaction(); } - @Override - public boolean commitTransaction() { + @Override public boolean commitTransaction() { if (!rawStore.commitTransaction()) { return false; } @@ -994,18 +994,15 @@ public boolean commitTransaction() { return true; } - @Override - public boolean isActiveTransaction() { + @Override public boolean isActiveTransaction() { return rawStore.isActiveTransaction(); } - @Override - public void rollbackTransaction() { + @Override public void rollbackTransaction() { rawStore.rollbackTransaction(); } - @Override - public void createCatalog(Catalog cat) throws MetaException { + @Override public void createCatalog(Catalog cat) throws MetaException { rawStore.createCatalog(cat); // in case of event based cache update, cache will not be updated for catalog. if (!canUseEvents) { @@ -1013,9 +1010,7 @@ public void createCatalog(Catalog cat) throws MetaException { } } - @Override - public void alterCatalog(String catName, Catalog cat) throws MetaException, - InvalidOperationException { + @Override public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException { rawStore.alterCatalog(catName, cat); // in case of event based cache update, cache will not be updated for catalog. if (!canUseEvents) { @@ -1023,8 +1018,7 @@ public void alterCatalog(String catName, Catalog cat) throws MetaException, } } - @Override - public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + @Override public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { // in case of event based cache update, cache will not be updated for catalog. if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { return rawStore.getCatalog(catalogName); @@ -1036,8 +1030,7 @@ public Catalog getCatalog(String catalogName) throws NoSuchObjectException, Meta return cat; } - @Override - public List getCatalogs() throws MetaException { + @Override public List getCatalogs() throws MetaException { // in case of event based cache update, cache will not be updated for catalog. if (!sharedCache.isCatalogCachePrewarmed() || canUseEvents) { return rawStore.getCatalogs(); @@ -1045,8 +1038,7 @@ public Catalog getCatalog(String catalogName) throws NoSuchObjectException, Meta return sharedCache.listCachedCatalogs(); } - @Override - public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + @Override public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { rawStore.dropCatalog(catalogName); // in case of event based cache update, cache will not be updated for catalog. @@ -1056,8 +1048,7 @@ public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaEx } } - @Override - public void createDatabase(Database db) throws InvalidObjectException, MetaException { + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); // in case of event based cache update, cache will be updated during commit. if (!canUseEvents) { @@ -1065,8 +1056,7 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } } - @Override - public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + @Override public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { // in case of event based cache update, cache will be updated during commit. So within active transaction, read // directly from rawStore to avoid reading stale data as the data updated during same transaction will not be // updated in the cache. @@ -1074,65 +1064,58 @@ public Database getDatabase(String catName, String dbName) throws NoSuchObjectEx return rawStore.getDatabase(catName, dbName); } dbName = dbName.toLowerCase(); - Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + Database db = sharedCache + .getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); if (db == null) { throw new NoSuchObjectException(); } return db; } - @Override - public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { + @Override public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { boolean succ = rawStore.dropDatabase(catName, dbName); if (succ && !canUseEvents) { // in case of event based cache update, cache will be updated during commit. - sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + sharedCache + .removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName)); } return succ; } - @Override - public boolean alterDatabase(String catName, String dbName, Database db) + @Override public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { boolean succ = rawStore.alterDatabase(catName, dbName, db); if (succ && !canUseEvents) { // in case of event based cache update, cache will be updated during commit. - sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), db); + sharedCache + .alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), StringUtils.normalizeIdentifier(dbName), db); } return succ; } - @Override - public List getDatabases(String catName, String pattern) throws MetaException { + @Override public List getDatabases(String catName, String pattern) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getDatabases(catName, pattern); } return sharedCache.listCachedDatabases(catName, pattern); } - @Override - public List getAllDatabases(String catName) throws MetaException { + @Override public List getAllDatabases(String catName) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed() || (canUseEvents && rawStore.isActiveTransaction())) { return rawStore.getAllDatabases(catName); } return sharedCache.listCachedDatabases(catName); } - @Override - public boolean createType(Type type) { + @Override public boolean createType(Type type) { return rawStore.createType(type); } - @Override - public Type getType(String typeName) { + @Override public Type getType(String typeName) { return rawStore.getType(typeName); } - @Override - public boolean dropType(String typeName) { + @Override public boolean dropType(String typeName) { return rawStore.dropType(typeName); } @@ -1154,8 +1137,7 @@ private void validateTableType(Table tbl) { tbl.setTableType(tableType); } - @Override - public void createTable(Table tbl) throws InvalidObjectException, MetaException { + @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { rawStore.createTable(tbl); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { @@ -1171,8 +1153,7 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException sharedCache.addTableToCache(catName, dbName, tblName, tbl); } - @Override - public boolean dropTable(String catName, String dbName, String tblName) + @Override public boolean dropTable(String catName, String dbName, String tblName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean succ = rawStore.dropTable(catName, dbName, tblName); // in case of event based cache update, cache will be updated during commit. @@ -1188,13 +1169,12 @@ public boolean dropTable(String catName, String dbName, String tblName) return succ; } - @Override - public Table getTable(String catName, String dbName, String tblName) throws MetaException { + @Override public Table getTable(String catName, String dbName, String tblName) throws MetaException { return getTable(catName, dbName, tblName, null); } - @Override - public Table getTable(String catName, String dbName, String tblName, String validWriteIds) throws MetaException { + @Override public Table getTable(String catName, String dbName, String tblName, String validWriteIds) + throws MetaException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -1209,7 +1189,11 @@ public Table getTable(String catName, String dbName, String tblName, String vali // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); - return rawStore.getTable(catName, dbName, tblName, validWriteIds); + Table t = rawStore.getTable(catName, dbName, tblName, validWriteIds); + if (t != null) { + sharedCache.addTableToCache(catName, dbName, tblName, t); + } + return t; } if (validWriteIds != null) { tbl.setParameters( @@ -1237,8 +1221,7 @@ public Table getTable(String catName, String dbName, String tblName, String vali return tbl; } - @Override - public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + @Override public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { boolean succ = rawStore.addPartition(part); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { @@ -1253,8 +1236,7 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE return succ; } - @Override - public boolean addPartitions(String catName, String dbName, String tblName, List parts) + @Override public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); // in case of event based cache update, cache will be updated during commit. @@ -1270,9 +1252,8 @@ public boolean addPartitions(String catName, String dbName, String tblName, List return succ; } - @Override - public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, - boolean ifNotExists) throws InvalidObjectException, MetaException { + @Override public boolean addPartitions(String catName, String dbName, String tblName, + PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { @@ -1291,65 +1272,56 @@ public boolean addPartitions(String catName, String dbName, String tblName, Part return succ; } - @Override - public Partition getPartition(String catName, String dbName, String tblName, List part_vals) + @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals) throws MetaException, NoSuchObjectException { - return getPartition(catName, dbName, tblName, part_vals, null); + return getPartition(catName, dbName, tblName, partVals, null); } - @Override - public Partition getPartition(String catName, String dbName, String tblName, - List part_vals, String validWriteIds) - throws MetaException, NoSuchObjectException { + @Override public Partition getPartition(String catName, String dbName, String tblName, List partVals, + String validWriteIds) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } - Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); + Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (part == null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } if (validWriteIds != null) { Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + return rawStore.getPartition(catName, dbName, tblName, partVals, validWriteIds); } - part.setParameters(adjustStatsParamsForGet(table.getParameters(), - part.getParameters(), part.getWriteId(), validWriteIds)); + part.setParameters( + adjustStatsParamsForGet(table.getParameters(), part.getParameters(), part.getWriteId(), validWriteIds)); } return part; } - @Override - public boolean doesPartitionExist(String catName, String dbName, String tblName, - List partKeys, List part_vals) - throws MetaException, NoSuchObjectException { + @Override public boolean doesPartitionExist(String catName, String dbName, String tblName, List partKeys, + List partVals) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); } Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); + return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, partVals); } - return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals); + return sharedCache.existPartitionFromCache(catName, dbName, tblName, partVals); } - @Override - public boolean dropPartition(String catName, String dbName, String tblName, List part_vals) + @Override public boolean dropPartition(String catName, String dbName, String tblName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals); + boolean succ = rawStore.dropPartition(catName, dbName, tblName, partVals); // in case of event based cache update, cache will be updated during commit. if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -1358,13 +1330,12 @@ public boolean dropPartition(String catName, String dbName, String tblName, List if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals); + sharedCache.removePartitionFromCache(catName, dbName, tblName, partVals); } return succ; } - @Override - public void dropPartitions(String catName, String dbName, String tblName, List partNames) + @Override public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { rawStore.dropPartitions(catName, dbName, tblName, partNames); // in case of event based cache update, cache will be updated during commit. @@ -1384,8 +1355,7 @@ public void dropPartitions(String catName, String dbName, String tblName, List getPartitions(String catName, String dbName, String tblName, int max) + @Override public List getPartitions(String catName, String dbName, String tblName, int max) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -1402,26 +1372,23 @@ public void dropPartitions(String catName, String dbName, String tblName, List getPartitionLocations(String catName, String dbName, String tblName, + @Override public Map getPartitionLocations(String catName, String dbName, String tblName, String baseLocationToNotShow, int max) { return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max); } - @Override - public Table alterTable(String catName, String dbName, String tblName, Table newTable, - String validWriteIds) throws InvalidObjectException, MetaException { + @Override public Table alterTable(String catName, String dbName, String tblName, Table newTable, String validWriteIds) + throws InvalidObjectException, MetaException { newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { - return newTable; + return newTable; } catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); String newTblName = normalizeIdentifier(newTable.getTableName()); - if (!shouldCacheTable(catName, dbName, tblName) && - !shouldCacheTable(catName, dbName, newTblName)) { + if (!shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { return newTable; } Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); @@ -1442,60 +1409,36 @@ public Table alterTable(String catName, String dbName, String tblName, Table new return newTable; } - @Override - public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) + @Override public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { rawStore.updateCreationMetadata(catName, dbname, tablename, cm); } - @Override - public List getTables(String catName, String dbName, String pattern) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get() || !isCachedAllMetadata.get() || - (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getTables(catName, dbName, pattern); - } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), pattern, -1); + @Override public List getTables(String catName, String dbName, String pattern) throws MetaException { + return rawStore.getTables(catName, dbName, pattern); } - @Override - public List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) + @Override public List getTables(String catName, String dbName, String pattern, TableType tableType, int limit) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()|| !isCachedAllMetadata.get() - || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getTables(catName, dbName, pattern, tableType, limit); - } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), pattern, tableType, limit); + return rawStore.getTables(catName, dbName, pattern, tableType, limit); } - @Override - public List getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException { + @Override public List
getAllMaterializedViewObjectsForRewriting(String catName) throws MetaException { // TODO fucntionCache return rawStore.getAllMaterializedViewObjectsForRewriting(catName); } - @Override - public List getMaterializedViewsForRewriting(String catName, String dbName) + @Override public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { return rawStore.getMaterializedViewsForRewriting(catName, dbName); } - @Override - public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) - throws MetaException { - // TODO Check if all required tables are allowed, if so, get it from cache - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get() || !isCachedAllMetadata.get() || - (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); - } - return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbNames), - StringUtils.normalizeIdentifier(tableNames), tableTypes); + @Override public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { + return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } - @Override - public List
getTableObjectsByName(String catName, String dbName, List tblNames) + @Override public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { if (canUseEvents && rawStore.isActiveTransaction()) { return rawStore.getTableObjectsByName(catName, dbName, tblNames); @@ -1523,6 +1466,7 @@ public void updateCreationMetadata(String catName, String dbname, String tablena Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { tbl = rawStore.getTable(catName, dbName, tblName); + sharedCache.addTableToCache(catName, dbName, tblName, tbl); } if (tbl != null) { tables.add(tbl); @@ -1532,58 +1476,48 @@ public void updateCreationMetadata(String catName, String dbname, String tablena return tables; } - @Override - public List getAllTables(String catName, String dbName) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get() || !isCachedAllMetadata.get() || - (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.getAllTables(catName, dbName); - } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + @Override public List getAllTables(String catName, String dbName) throws MetaException { + return rawStore.getAllTables(catName, dbName); } @Override // TODO: implement using SharedCache - public List listTableNamesByFilter(String catName, String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables); + return rawStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } - @Override - public List listPartitionNames(String catName, String dbName, String tblName, - short max_parts) throws MetaException { + @Override public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + throws MetaException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) { - return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); } Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); + return rawStore.listPartitionNames(catName, dbName, tblName, maxParts); } List partitionNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) { - if (max_parts == -1 || count < max_parts) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { + if (maxParts == -1 || count < maxParts) { partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); } } return partitionNames; } - @Override - public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, - List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException { + @Override public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tblName, + List cols, boolean applyDistinct, String filter, boolean ascending, List order, + long maxParts) throws MetaException { throw new UnsupportedOperationException(); } - @Override - public Partition alterPartition(String catName, String dbName, String tblName, - List partVals, Partition newPart, String validWriteIds) - throws InvalidObjectException, MetaException { + @Override public Partition alterPartition(String catName, String dbName, String tblName, List partVals, + Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { @@ -1599,13 +1533,10 @@ public Partition alterPartition(String catName, String dbName, String tblName, return newPart; } - @Override - public List alterPartitions(String catName, String dbName, String tblName, - List> partValsList, List newParts, - long writeId, String validWriteIds) + @Override public List alterPartitions(String catName, String dbName, String tblName, + List> partValsList, List newParts, long writeId, String validWriteIds) throws InvalidObjectException, MetaException { - newParts = rawStore.alterPartitions( - catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); + newParts = rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); // in case of event based cache update, cache will be updated during commit. if (canUseEvents) { return newParts; @@ -1620,43 +1551,37 @@ public Partition alterPartition(String catName, String dbName, String tblName, return newParts; } - private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, - String defaultPartName, short maxParts, List result, SharedCache sharedCache) - throws MetaException, NoSuchObjectException { - List parts = - sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), - StringUtils.normalizeIdentifier(table.getDbName()), - StringUtils.normalizeIdentifier(table.getTableName()), maxParts); + private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, + List result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { + List parts = sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), + StringUtils.normalizeIdentifier(table.getDbName()), StringUtils.normalizeIdentifier(table.getTableName()), + maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); } - return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, - result); + return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName, result); } @Override // TODO: implement using SharedCache - public List getPartitionsByFilter(String catName, String dbName, String tblName, - String filter, short maxParts) - throws MetaException, NoSuchObjectException { + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, + short maxParts) throws MetaException, NoSuchObjectException { return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override /** * getPartitionSpecsByFilterAndProjection interface is currently non-cacheable. - */ - public List getPartitionSpecsByFilterAndProjection(Table table, + */ public List getPartitionSpecsByFilterAndProjection(Table table, GetPartitionsProjectionSpec projectionSpec, GetPartitionsFilterSpec filterSpec) throws MetaException, NoSuchObjectException { return rawStore.getPartitionSpecsByFilterAndProjection(table, projectionSpec, filterSpec); } - @Override - public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, + @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -1680,14 +1605,12 @@ public boolean getPartitionsByExpr(String catName, String dbName, String tblName return hasUnknownPartitions; } - @Override - public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) + @Override public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } - @Override - public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + @Override public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -1702,13 +1625,11 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, // The table is not yet loaded in cache return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } - getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, - sharedCache); + getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); return partNames.size(); } - @VisibleForTesting - public static List partNameToVals(String name) { + @VisibleForTesting public static List partNameToVals(String name) { if (name == null) { return null; } @@ -1720,8 +1641,7 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, return vals; } - @Override - public List getPartitionsByNames(String catName, String dbName, String tblName, + @Override public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -1737,179 +1657,144 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, List partitions = new ArrayList<>(); for (String partName : partNames) { Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); - if (part!=null) { + if (part != null) { partitions.add(part); } } return partitions; } - @Override - public Table markPartitionForEvent(String catName, String dbName, String tblName, + @Override public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) - throws MetaException, UnknownTableException, InvalidPartitionException, - UnknownPartitionException { + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } - @Override - public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, + @Override public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) - throws MetaException, UnknownTableException, InvalidPartitionException, - UnknownPartitionException { + throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } - @Override - public boolean addRole(String rowName, String ownerName) + @Override public boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException { return rawStore.addRole(rowName, ownerName); } - @Override - public boolean removeRole(String roleName) - throws MetaException, NoSuchObjectException { + @Override public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { return rawStore.removeRole(roleName); } - @Override - public boolean grantRole(Role role, String userName, - PrincipalType principalType, String grantor, PrincipalType grantorType, - boolean grantOption) + @Override public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException { return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption); } - @Override - public boolean revokeRole(Role role, String userName, - PrincipalType principalType, boolean grantOption) + @Override public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption) throws MetaException, NoSuchObjectException { return rawStore.revokeRole(role, userName, principalType, grantOption); } - @Override - public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, - List groupNames) throws InvalidObjectException, MetaException { + @Override public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) + throws InvalidObjectException, MetaException { return rawStore.getUserPrivilegeSet(userName, groupNames); } - @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, + @Override public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } - @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, - String tableName, String userName, List groupNames) - throws InvalidObjectException, MetaException { + @Override public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, + String userName, List groupNames) throws InvalidObjectException, MetaException { return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } - @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, - String tableName, String partition, String userName, - List groupNames) throws InvalidObjectException, MetaException { + @Override public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, + String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } - @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, - String tableName, String partitionName, String columnName, - String userName, List groupNames) + @Override public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, + String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } - @Override - public List listPrincipalGlobalGrants( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalGlobalGrants(principalName, principalType); } - @Override - public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String catName, String dbName) { + @Override public List listPrincipalDBGrants(String principalName, PrincipalType principalType, + String catName, String dbName) { return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } - @Override - public List listAllTableGrants(String principalName, - PrincipalType principalType, String catName, String dbName, String tableName) { + @Override public List listAllTableGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName) { return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName); } - @Override - public List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, List partValues, String partName) { - return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName); + @Override public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, + String partName) { + return rawStore + .listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName); } - @Override - public List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, String columnName) { - return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); + @Override public List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { + return rawStore + .listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); } - @Override - public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String catName, String dbName, - String tableName, List partValues, String partName, - String columnName) { - return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName); + @Override public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, + String partName, String columnName) { + return rawStore + .listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, + partName, columnName); } - @Override - public boolean grantPrivileges(PrivilegeBag privileges) + @Override public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException { return rawStore.grantPrivileges(privileges); } - @Override - public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) + @Override public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws InvalidObjectException, MetaException, NoSuchObjectException { return rawStore.revokePrivileges(privileges, grantOption); } - @Override - public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) - throws InvalidObjectException, MetaException, NoSuchObjectException { + @Override public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, + PrivilegeBag grantPrivileges) throws InvalidObjectException, MetaException, NoSuchObjectException { return rawStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges); } - @Override - public Role getRole(String roleName) throws NoSuchObjectException { + @Override public Role getRole(String roleName) throws NoSuchObjectException { return rawStore.getRole(roleName); } - @Override - public List listRoleNames() { + @Override public List listRoleNames() { return rawStore.listRoleNames(); } - @Override - public List listRoles(String principalName, - PrincipalType principalType) { + @Override public List listRoles(String principalName, PrincipalType principalType) { return rawStore.listRoles(principalName, principalType); } - @Override - public List listRolesWithGrants(String principalName, - PrincipalType principalType) { + @Override public List listRolesWithGrants(String principalName, PrincipalType principalType) { return rawStore.listRolesWithGrants(principalName, principalType); } - @Override - public List listRoleMembers(String roleName) { + @Override public List listRoleMembers(String roleName) { return rawStore.listRoleMembers(roleName); } - @Override - public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String userName, List groupNames) - throws MetaException, NoSuchObjectException, InvalidObjectException { + @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, + String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -1924,8 +1809,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, - userName, groupNames); + PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); p.setPrivileges(privs); } else { throw new NoSuchObjectException("partition values=" + partVals.toString()); @@ -1933,10 +1817,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN return p; } - @Override - public List getPartitionsWithAuth(String catName, String dbName, String tblName, - short maxParts, String userName, List groupNames) - throws MetaException, NoSuchObjectException, InvalidObjectException { + @Override public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, + String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -1953,8 +1835,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, - userName, groupNames); + PrincipalPrivilegeSet privs = + getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); count++; @@ -1963,9 +1845,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN return partitions; } - @Override - public List listPartitionNamesPs(String catName, String dbName, String tblName, List partSpecs, - short maxParts) throws MetaException, NoSuchObjectException { + @Override public List listPartitionNamesPs(String catName, String dbName, String tblName, + List partSpecs, short maxParts) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); @@ -1991,9 +1872,8 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN return partitionNames; } - @Override - public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partSpecs, - short maxParts, String userName, List groupNames) + @Override public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, + List partSpecs, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -2045,11 +1925,13 @@ private String getPartNameMatcher(Table table, List partSpecs) throws Me } // Note: ideally this should be above both CachedStore and ObjectStore. - private Map adjustStatsParamsForGet(Map tableParams, - Map params, long statsWriteId, String validWriteIds) throws MetaException { - if (!TxnUtils.isTransactionalTable(tableParams)) return params; // Not a txn table. - if (areTxnStatsSupported && ((validWriteIds == null) - || ObjectStore.isCurrentStatsValidForTheQuery(params, statsWriteId, validWriteIds, false))) { + private Map adjustStatsParamsForGet(Map tableParams, Map params, + long statsWriteId, String validWriteIds) throws MetaException { + if (!TxnUtils.isTransactionalTable(tableParams)) { + return params; // Not a txn table. + } + if (areTxnStatsSupported && ((validWriteIds == null) || ObjectStore + .isCurrentStatsValidForTheQuery(params, statsWriteId, validWriteIds, false))) { // Valid stats are supported for txn tables, and either no verification was requested by the // caller, or the verification has succeeded. return params; @@ -2060,16 +1942,15 @@ private String getPartNameMatcher(Table table, List partSpecs) throws Me return params; } - // Note: ideally this should be above both CachedStore and ObjectStore. - public static ColumnStatistics adjustColStatForGet(Map tableParams, - ColumnStatistics colStat, long statsWriteId, - String validWriteIds, boolean areTxnStatsSupported) throws MetaException { + public static ColumnStatistics adjustColStatForGet(Map tableParams, ColumnStatistics colStat, + long statsWriteId, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { colStat.setIsStatsCompliant(true); - if (!TxnUtils.isTransactionalTable(tableParams)) return colStat; // Not a txn table. - if (areTxnStatsSupported && ((validWriteIds == null) - || ObjectStore.isCurrentStatsValidForTheQuery( - tableParams, statsWriteId, validWriteIds, false))) { + if (!TxnUtils.isTransactionalTable(tableParams)) { + return colStat; // Not a txn table. + } + if (areTxnStatsSupported && ((validWriteIds == null) || ObjectStore + .isCurrentStatsValidForTheQuery(tableParams, statsWriteId, validWriteIds, false))) { // Valid stats are supported for txn tables, and either no verification was requested by the // caller, or the verification has succeeded. return colStat; @@ -2080,11 +1961,9 @@ public static ColumnStatistics adjustColStatForGet(Map tablePara } private static void updateTableColumnsStatsInternal(Configuration conf, ColumnStatistics colStats, - Map newParams, String validWriteIds, - long writeId) throws MetaException { - String catName = colStats.getStatsDesc().isSetCatName() ? - normalizeIdentifier(colStats.getStatsDesc().getCatName()) : - getDefaultCatalog(conf); + Map newParams, String validWriteIds, long writeId) throws MetaException { + String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( + colStats.getStatsDesc().getCatName()) : getDefaultCatalog(conf); String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); if (!shouldCacheTable(catName, dbName, tblName)) { @@ -2101,17 +1980,17 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt if (!areTxnStatsSupported) { StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); } else { - String errorMsg = ObjectStore.verifyStatsChangeCtx(TableName.getDbTable(dbName, tblName), - table.getParameters(), newParams, writeId, validWriteIds, true); + String errorMsg = ObjectStore + .verifyStatsChangeCtx(TableName.getDbTable(dbName, tblName), table.getParameters(), newParams, writeId, + validWriteIds, true); if (errorMsg != null) { throw new MetaException(errorMsg); } - if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, table.getWriteId(), - validWriteIds, true)) { + if (!ObjectStore.isCurrentStatsValidForTheQuery(newParams, table.getWriteId(), validWriteIds, true)) { // Make sure we set the flag to invalid regardless of the current value. StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE); - LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " - + table.getDbName() + "." + table.getTableName()); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + table.getDbName() + "." + table + .getTableName()); } } } @@ -2122,12 +2001,9 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj()); } - @Override - public Map updateTableColumnStatistics(ColumnStatistics colStats, - String validWriteIds, long writeId) - throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = rawStore.updateTableColumnStatistics( - colStats, validWriteIds, writeId); + @Override public Map updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, + long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + Map newParams = rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId); // in case of event based cache update, cache will be updated during commit. if (newParams != null && !canUseEvents) { updateTableColumnsStatsInternal(conf, colStats, newParams, null, writeId); @@ -2135,43 +2011,35 @@ private static void updateTableColumnsStatsInternal(Configuration conf, ColumnSt return newParams; } - @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, + @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { return getTableColumnStatistics(catName, dbName, tblName, colNames, null); } - @Override - public ColumnStatistics getTableColumnStatistics( - String catName, String dbName, String tblName, List colNames, - String validWriteIds) - throws MetaException, NoSuchObjectException { + @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, + List colNames, String validWriteIds) throws MetaException, NoSuchObjectException { catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTableColumnStatistics( - catName, dbName, tblName, colNames, validWriteIds); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics( - catName, dbName, tblName, colNames, validWriteIds); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } ColumnStatistics columnStatistics = sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames, validWriteIds, areTxnStatsSupported); if (columnStatistics == null) { - LOG.info("Stat of Table {}.{} for column {} is not present in cache." + - "Getting from raw store", dbName, tblName, colNames); + LOG.info("Stat of Table {}.{} for column {} is not present in cache." + "Getting from raw store", dbName, tblName, + colNames); return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames, validWriteIds); } return columnStatistics; } - @Override - public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, - String colName) + @Override public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); // in case of event based cache update, cache is updated during commit txn @@ -2187,16 +2055,15 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String return succ; } - @Override - public Map updatePartitionColumnStatistics(ColumnStatistics colStats, - List partVals, String validWriteIds, long writeId) + @Override public Map updatePartitionColumnStatistics(ColumnStatistics colStats, List partVals, + String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - Map newParams = rawStore.updatePartitionColumnStatistics( - colStats, partVals, validWriteIds, writeId); + Map newParams = + rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId); // in case of event based cache update, cache is updated during commit txn if (newParams != null && !canUseEvents) { - String catName = colStats.getStatsDesc().isSetCatName() ? - normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; + String catName = colStats.getStatsDesc().isSetCatName() ? normalizeIdentifier( + colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); if (!shouldCacheTable(catName, dbName, tblName)) { @@ -2210,36 +2077,31 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String return newParams; } - @Override - public List getPartitionColumnStatistics(String catName, String dbName, String tblName, + @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null); } - @Override - public List getPartitionColumnStatistics( - String catName, String dbName, String tblName, List partNames, - List colNames, String writeIdList) - throws MetaException, NoSuchObjectException { + @Override public List getPartitionColumnStatistics(String catName, String dbName, String tblName, + List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { // If writeIdList is not null, that means stats are requested within a txn context. So set stats compliant to false, // if areTxnStatsSupported is false or the write id which has updated the stats in not compatible with writeIdList. // This is done within table lock as the number of partitions may be more than one and we need a consistent view // for all the partitions. - List columnStatistics = sharedCache.getPartitionColStatsListFromCache(catName, dbName, tblName, - partNames, colNames, writeIdList, areTxnStatsSupported); + List columnStatistics = sharedCache + .getPartitionColStatsListFromCache(catName, dbName, tblName, partNames, colNames, writeIdList, + areTxnStatsSupported); if (columnStatistics == null) { return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, writeIdList); } return columnStatistics; } - @Override - public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, - List partVals, String colName) + @Override public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, + String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = - rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); + boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); // in case of event based cache update, cache is updated during commit txn. if (succ && !canUseEvents) { catName = normalizeIdentifier(catName); @@ -2253,17 +2115,13 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St return succ; } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); } - @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, - List partNames, List colNames, - String writeIdList) - throws MetaException, NoSuchObjectException { + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, + List colNames, String writeIdList) throws MetaException, NoSuchObjectException { List colStats; catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); @@ -2272,14 +2130,12 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam // (incl. due to lack of sync w.r.t. the below rawStore call). // In case the cache is updated using events, aggregate is calculated locally and thus can be read from cache. if (!shouldCacheTable(catName, dbName, tblName) || (writeIdList != null && !canUseEvents)) { - return rawStore.get_aggr_stats_for( - catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); } Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for( - catName, dbName, tblName, partNames, colNames, writeIdList); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); } List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); @@ -2301,21 +2157,21 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam } } - LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", - tblName, partNames, colNames); - MergedColumnStatsForPartitions mergedColStats = mergeColStatsForPartitions(catName, dbName, tblName, - partNames, colNames, sharedCache, type, writeIdList); + LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, + colNames); + MergedColumnStatsForPartitions mergedColStats = + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache, type, writeIdList); if (mergedColStats == null) { - LOG.info("Aggregate stats of partition " + TableName.getQualified(catName, dbName, tblName) + "." + - partNames + " for columns " + colNames + " is not present in cache. Getting it from raw store"); + LOG.info("Aggregate stats of partition " + TableName.getQualified(catName, dbName, tblName) + "." + partNames + + " for columns " + colNames + " is not present in cache. Getting it from raw store"); return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, writeIdList); } return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } - private MergedColumnStatsForPartitions mergeColStatsForPartitions( - String catName, String dbName, String tblName, List partNames, List colNames, - SharedCache sharedCache, StatsType type, String writeIdList) throws MetaException { + private MergedColumnStatsForPartitions mergeColStatsForPartitions(String catName, String dbName, String tblName, + List partNames, List colNames, SharedCache sharedCache, StatsType type, String writeIdList) + throws MetaException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); @@ -2335,8 +2191,8 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions( // the behavior same as object store. // 3. Partition is missing or its stat is updated by live(not yet committed) or aborted txn. In this case, // colStatsWriteId is null. Thus null is returned to keep the behavior same as object store. - SharedCache.ColumStatsWithWriteId colStatsWriteId = sharedCache.getPartitionColStatsFromCache(catName, dbName, - tblName, partValue, colName, writeIdList); + SharedCache.ColumStatsWithWriteId colStatsWriteId = + sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partValue, colName, writeIdList); if (colStatsWriteId == null) { return null; } @@ -2349,15 +2205,14 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions( new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName); colStatsWithPartInfoList.add(colStatsWithPartInfo); if (colStatsAggregator == null) { - colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator( - colStatsForPart.getStatsData().getSetField(), useDensityFunctionForNDVEstimation, - ndvTuner); + colStatsAggregator = ColumnStatsAggregatorFactory + .getColumnStatsAggregator(colStatsForPart.getStatsData().getSetField(), + useDensityFunctionForNDVEstimation, ndvTuner); } partsFoundForColumn++; } else { - LOG.debug( - "Stats not found in CachedStore for: dbName={} tblName={} partName={} colName={}", - dbName, tblName, partName, colName); + LOG.debug("Stats not found in CachedStore for: dbName={} tblName={} partName={} colName={}", dbName, tblName, + partName, colName); } } if (colStatsWithPartInfoList.size() > 0) { @@ -2369,27 +2224,26 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions( partsFound = partsFoundForColumn; } if (colStatsMap.size() < 1) { - LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName, - tblName, partNames, colNames); + LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName, tblName, partNames, + colNames); return new MergedColumnStatsForPartitions(new ArrayList(), 0); } } // Note that enableBitVector does not apply here because ColumnStatisticsObj // itself will tell whether bitvector is null or not and aggr logic can automatically apply. - List colAggrStats = MetaStoreServerUtils.aggrPartitionStats(colStatsMap, - partNames, partsFound == partNames.size(), useDensityFunctionForNDVEstimation, ndvTuner); + List colAggrStats = MetaStoreServerUtils + .aggrPartitionStats(colStatsMap, partNames, partsFound == partNames.size(), useDensityFunctionForNDVEstimation, + ndvTuner); if (canUseEvents) { if (type == StatsType.ALL) { sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), new AggrStats(colAggrStats, partsFound), - null, partNameToWriteId); + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), + new AggrStats(colAggrStats, partsFound), null, partNameToWriteId); } else if (type == StatsType.ALLBUTDEFAULT) { sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), null, - new AggrStats(colAggrStats, partsFound), partNameToWriteId); + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), null, + new AggrStats(colAggrStats, partsFound), partNameToWriteId); } } return new MergedColumnStatsForPartitions(colAggrStats, partsFound); @@ -2413,440 +2267,355 @@ long getPartsFound() { } } - @Override - public long cleanupEvents() { + @Override public long cleanupEvents() { return rawStore.cleanupEvents(); } - @Override - public boolean addToken(String tokenIdentifier, String delegationToken) { + @Override public boolean addToken(String tokenIdentifier, String delegationToken) { return rawStore.addToken(tokenIdentifier, delegationToken); } - @Override - public boolean removeToken(String tokenIdentifier) { + @Override public boolean removeToken(String tokenIdentifier) { return rawStore.removeToken(tokenIdentifier); } - @Override - public String getToken(String tokenIdentifier) { + @Override public String getToken(String tokenIdentifier) { return rawStore.getToken(tokenIdentifier); } - @Override - public List getAllTokenIdentifiers() { + @Override public List getAllTokenIdentifiers() { return rawStore.getAllTokenIdentifiers(); } - @Override - public int addMasterKey(String key) throws MetaException { + @Override public int addMasterKey(String key) throws MetaException { return rawStore.addMasterKey(key); } - @Override - public void updateMasterKey(Integer seqNo, String key) - throws NoSuchObjectException, MetaException { + @Override public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, MetaException { rawStore.updateMasterKey(seqNo, key); } - @Override - public boolean removeMasterKey(Integer keySeq) { + @Override public boolean removeMasterKey(Integer keySeq) { return rawStore.removeMasterKey(keySeq); } - @Override - public String[] getMasterKeys() { + @Override public String[] getMasterKeys() { return rawStore.getMasterKeys(); } - @Override - public void verifySchema() throws MetaException { + @Override public void verifySchema() throws MetaException { rawStore.verifySchema(); } - @Override - public String getMetaStoreSchemaVersion() throws MetaException { + @Override public String getMetaStoreSchemaVersion() throws MetaException { return rawStore.getMetaStoreSchemaVersion(); } - @Override - public void setMetaStoreSchemaVersion(String version, String comment) - throws MetaException { + @Override public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { rawStore.setMetaStoreSchemaVersion(version, comment); } - @Override - public List listPrincipalDBGrantsAll( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalDBGrantsAll(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalDBGrantsAll(principalName, principalType); } - @Override - public List listPrincipalTableGrantsAll( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalTableGrantsAll(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalTableGrantsAll(principalName, principalType); } - @Override - public List listPrincipalPartitionGrantsAll( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalPartitionGrantsAll(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalPartitionGrantsAll(principalName, principalType); } - @Override - public List listPrincipalTableColumnGrantsAll( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalTableColumnGrantsAll(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalTableColumnGrantsAll(principalName, principalType); } - @Override - public List listPrincipalPartitionColumnGrantsAll( - String principalName, PrincipalType principalType) { + @Override public List listPrincipalPartitionColumnGrantsAll(String principalName, + PrincipalType principalType) { return rawStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - @Override - public List listGlobalGrantsAll() { + @Override public List listGlobalGrantsAll() { return rawStore.listGlobalGrantsAll(); } - @Override - public List listDBGrantsAll(String catName, String dbName) { + @Override public List listDBGrantsAll(String catName, String dbName) { return rawStore.listDBGrantsAll(catName, dbName); } - @Override - public List listPartitionColumnGrantsAll(String catName, String dbName, + @Override public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { return rawStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } - @Override - public List listTableGrantsAll(String catName, String dbName, - String tableName) { + @Override public List listTableGrantsAll(String catName, String dbName, String tableName) { return rawStore.listTableGrantsAll(catName, dbName, tableName); } - @Override - public List listPartitionGrantsAll(String catName, String dbName, - String tableName, String partitionName) { + @Override public List listPartitionGrantsAll(String catName, String dbName, String tableName, + String partitionName) { return rawStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } - @Override - public List listTableColumnGrantsAll(String catName, String dbName, - String tableName, String columnName) { + @Override public List listTableColumnGrantsAll(String catName, String dbName, String tableName, + String columnName) { return rawStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } - @Override - public void createFunction(Function func) - throws InvalidObjectException, MetaException { + @Override public void createFunction(Function func) throws InvalidObjectException, MetaException { // TODO fucntionCache rawStore.createFunction(func); } - @Override - public void alterFunction(String catName, String dbName, String funcName, - Function newFunction) throws InvalidObjectException, MetaException { + @Override public void alterFunction(String catName, String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException { // TODO fucntionCache rawStore.alterFunction(catName, dbName, funcName, newFunction); } - @Override - public void dropFunction(String catName, String dbName, String funcName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { + @Override public void dropFunction(String catName, String dbName, String funcName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { // TODO fucntionCache rawStore.dropFunction(catName, dbName, funcName); } - @Override - public Function getFunction(String catName, String dbName, String funcName) - throws MetaException { + @Override public Function getFunction(String catName, String dbName, String funcName) throws MetaException { // TODO fucntionCache return rawStore.getFunction(catName, dbName, funcName); } - @Override - public List getAllFunctions(String catName) throws MetaException { + @Override public List getAllFunctions(String catName) throws MetaException { // TODO fucntionCache return rawStore.getAllFunctions(catName); } - @Override - public List getFunctions(String catName, String dbName, String pattern) - throws MetaException { + @Override public List getFunctions(String catName, String dbName, String pattern) throws MetaException { // TODO fucntionCache return rawStore.getFunctions(catName, dbName, pattern); } - @Override - public NotificationEventResponse getNextNotification( - NotificationEventRequest rqst) { + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return rawStore.getNextNotification(rqst); } - @Override - public void addNotificationEvent(NotificationEvent event) throws MetaException { + @Override public void addNotificationEvent(NotificationEvent event) throws MetaException { rawStore.addNotificationEvent(event); } - @Override - public void cleanNotificationEvents(int olderThan) { + @Override public void cleanNotificationEvents(int olderThan) { rawStore.cleanNotificationEvents(olderThan); } - @Override - public CurrentNotificationEventId getCurrentNotificationEventId() { + @Override public CurrentNotificationEventId getCurrentNotificationEventId() { return rawStore.getCurrentNotificationEventId(); } - @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { + @Override public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { return rawStore.getNotificationEventsCount(rqst); } - @Override - public void flushCache() { + @Override public void flushCache() { rawStore.flushCache(); } - @Override - public ByteBuffer[] getFileMetadata(List fileIds) throws MetaException { + @Override public ByteBuffer[] getFileMetadata(List fileIds) throws MetaException { return rawStore.getFileMetadata(fileIds); } - @Override - public void putFileMetadata(List fileIds, List metadata, - FileMetadataExprType type) throws MetaException { + @Override public void putFileMetadata(List fileIds, List metadata, FileMetadataExprType type) + throws MetaException { rawStore.putFileMetadata(fileIds, metadata, type); } - @Override - public boolean isFileMetadataSupported() { + @Override public boolean isFileMetadataSupported() { return rawStore.isFileMetadataSupported(); } - @Override - public void getFileMetadataByExpr(List fileIds, - FileMetadataExprType type, byte[] expr, ByteBuffer[] metadatas, - ByteBuffer[] exprResults, boolean[] eliminated) throws MetaException { + @Override public void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] expr, + ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated) throws MetaException { rawStore.getFileMetadataByExpr(fileIds, type, expr, metadatas, exprResults, eliminated); } - @Override - public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { + @Override public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { return rawStore.getFileMetadataHandler(type); } - @Override - public int getTableCount() throws MetaException { + @Override public int getTableCount() throws MetaException { return rawStore.getTableCount(); } - @Override - public int getPartitionCount() throws MetaException { + @Override public int getPartitionCount() throws MetaException { return rawStore.getPartitionCount(); } - @Override - public int getDatabaseCount() throws MetaException { + @Override public int getDatabaseCount() throws MetaException { return rawStore.getDatabaseCount(); } - @Override - public List getPrimaryKeys(String catName, String db_name, String tbl_name) + @Override public List getPrimaryKeys(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getPrimaryKeys(catName, db_name, tbl_name); + return rawStore.getPrimaryKeys(catName, dbName, tblName); } - @Override - public List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) - throws MetaException { + @Override public List getForeignKeys(String catName, String parentDbName, String parentTblName, + String foreignDbName, String foreignTblName) throws MetaException { // TODO constraintCache - return rawStore.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return rawStore.getForeignKeys(catName, parentDbName, parentTblName, foreignDbName, foreignTblName); } - @Override - public List getUniqueConstraints(String catName, String db_name, String tbl_name) + @Override public List getUniqueConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getUniqueConstraints(catName, db_name, tbl_name); + return rawStore.getUniqueConstraints(catName, dbName, tblName); } - @Override - public List getNotNullConstraints(String catName, String db_name, String tbl_name) + @Override public List getNotNullConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getNotNullConstraints(catName, db_name, tbl_name); + return rawStore.getNotNullConstraints(catName, dbName, tblName); } - @Override - public List getDefaultConstraints(String catName, String db_name, String tbl_name) + @Override public List getDefaultConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getDefaultConstraints(catName, db_name, tbl_name); + return rawStore.getDefaultConstraints(catName, dbName, tblName); } - @Override - public List getCheckConstraints(String catName, String db_name, String tbl_name) + @Override public List getCheckConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getCheckConstraints(catName, db_name, tbl_name); + return rawStore.getCheckConstraints(catName, dbName, tblName); } - @Override - public List createTableWithConstraints(Table tbl, List primaryKeys, + @Override public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, - List notNullConstraints, - List defaultConstraints, + List notNullConstraints, List defaultConstraints, List checkConstraints) throws InvalidObjectException, MetaException { // TODO constraintCache - List constraintNames = rawStore.createTableWithConstraints(tbl, primaryKeys, - foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + List constraintNames = rawStore + .createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, + defaultConstraints, checkConstraints); // in case of event based cache update, cache is updated during commit. if (canUseEvents) { return constraintNames; } String dbName = normalizeIdentifier(tbl.getDbName()); String tblName = normalizeIdentifier(tbl.getTableName()); - String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : - DEFAULT_CATALOG_NAME; + String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : DEFAULT_CATALOG_NAME; if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), - StringUtils.normalizeIdentifier(tbl.getDbName()), - StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + StringUtils.normalizeIdentifier(tbl.getDbName()), StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } - @Override - public void dropConstraint(String catName, String dbName, String tableName, - String constraintName, boolean missingOk) throws NoSuchObjectException { + @Override public void dropConstraint(String catName, String dbName, String tableName, String constraintName, + boolean missingOk) throws NoSuchObjectException { // TODO constraintCache rawStore.dropConstraint(catName, dbName, tableName, constraintName, missingOk); } - @Override - public List addPrimaryKeys(List pks) - throws InvalidObjectException, MetaException { + @Override public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addPrimaryKeys(pks); } - @Override - public List addForeignKeys(List fks) - throws InvalidObjectException, MetaException { + @Override public List addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addForeignKeys(fks); } - @Override - public List addUniqueConstraints(List uks) + @Override public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addUniqueConstraints(uks); } - @Override - public List addNotNullConstraints(List nns) + @Override public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addNotNullConstraints(nns); } - @Override - public List addDefaultConstraints(List nns) + @Override public List addDefaultConstraints(List nns) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addDefaultConstraints(nns); } - @Override - public List addCheckConstraints(List nns) + @Override public List addCheckConstraints(List nns) throws InvalidObjectException, MetaException { // TODO constraintCache return rawStore.addCheckConstraints(nns); } // TODO - not clear if we should cache these or not. For now, don't bother - @Override - public void createISchema(ISchema schema) + @Override public void createISchema(ISchema schema) throws AlreadyExistsException, NoSuchObjectException, MetaException { rawStore.createISchema(schema); } - @Override - public List getPartitionColStatsForDatabase(String catName, String dbName) + @Override public List getPartitionColStatsForDatabase(String catName, String dbName) throws MetaException, NoSuchObjectException { return rawStore.getPartitionColStatsForDatabase(catName, dbName); } - @Override - public void alterISchema(ISchemaName schemaName, ISchema newSchema) + @Override public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException { rawStore.alterISchema(schemaName, newSchema); } - @Override - public ISchema getISchema(ISchemaName schemaName) throws MetaException { + @Override public ISchema getISchema(ISchemaName schemaName) throws MetaException { return rawStore.getISchema(schemaName); } - @Override - public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException { + @Override public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException { rawStore.dropISchema(schemaName); } - @Override - public void addSchemaVersion(SchemaVersion schemaVersion) throws - AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException { + @Override public void addSchemaVersion(SchemaVersion schemaVersion) + throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException { rawStore.addSchemaVersion(schemaVersion); } - @Override - public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws - NoSuchObjectException, MetaException { + @Override public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) + throws NoSuchObjectException, MetaException { rawStore.alterSchemaVersion(version, newVersion); } - @Override - public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException { + @Override public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException { return rawStore.getSchemaVersion(version); } - @Override - public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException { + @Override public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException { return rawStore.getLatestSchemaVersion(schemaName); } - @Override - public List getAllSchemaVersion(ISchemaName schemaName) throws MetaException { + @Override public List getAllSchemaVersion(ISchemaName schemaName) throws MetaException { return rawStore.getAllSchemaVersion(schemaName); } - @Override - public List getSchemaVersionsByColumns(String colName, String colNamespace, - String type) throws MetaException { + @Override public List getSchemaVersionsByColumns(String colName, String colNamespace, String type) + throws MetaException { return rawStore.getSchemaVersionsByColumns(colName, colNamespace, type); } - @Override - public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, - MetaException { + @Override public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, MetaException { rawStore.dropSchemaVersion(version); } - @Override - public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException { + @Override public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException { return rawStore.getSerDeInfo(serDeName); } - @Override - public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException { + @Override public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException { rawStore.addSerde(serde); } @@ -2854,124 +2623,99 @@ public RawStore getRawStore() { return rawStore; } - @VisibleForTesting - public void setRawStore(RawStore rawStore) { + @VisibleForTesting public void setRawStore(RawStore rawStore) { this.rawStore = rawStore; } - @Override - public String getMetastoreDbUuid() throws MetaException { + @Override public String getMetastoreDbUuid() throws MetaException { return rawStore.getMetastoreDbUuid(); } - @Override - public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) + @Override public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException { rawStore.createResourcePlan(resourcePlan, copyFrom, defaultPoolSize); } - @Override - public WMFullResourcePlan getResourcePlan(String name, String ns) + @Override public WMFullResourcePlan getResourcePlan(String name, String ns) throws NoSuchObjectException, MetaException { return rawStore.getResourcePlan(name, ns); } - @Override - public List getAllResourcePlans(String ns) throws MetaException { + @Override public List getAllResourcePlans(String ns) throws MetaException { return rawStore.getAllResourcePlans(ns); } - @Override - public WMFullResourcePlan alterResourcePlan(String name, String ns, WMNullableResourcePlan resourcePlan, - boolean canActivateDisabled, boolean canDeactivate, boolean isReplace) - throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, - MetaException { - return rawStore.alterResourcePlan( - name, ns, resourcePlan, canActivateDisabled, canDeactivate, isReplace); + @Override public WMFullResourcePlan alterResourcePlan(String name, String ns, WMNullableResourcePlan resourcePlan, + boolean canActivateDisabled, boolean canDeactivate, boolean isReplace) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { + return rawStore.alterResourcePlan(name, ns, resourcePlan, canActivateDisabled, canDeactivate, isReplace); } - @Override - public WMFullResourcePlan getActiveResourcePlan(String ns) throws MetaException { + @Override public WMFullResourcePlan getActiveResourcePlan(String ns) throws MetaException { return rawStore.getActiveResourcePlan(ns); } - @Override - public WMValidateResourcePlanResponse validateResourcePlan(String name, String ns) + @Override public WMValidateResourcePlanResponse validateResourcePlan(String name, String ns) throws NoSuchObjectException, InvalidObjectException, MetaException { return rawStore.validateResourcePlan(name, ns); } - @Override - public void dropResourcePlan(String name, String ns) throws NoSuchObjectException, MetaException { + @Override public void dropResourcePlan(String name, String ns) throws NoSuchObjectException, MetaException { rawStore.dropResourcePlan(name, ns); } - @Override - public void createWMTrigger(WMTrigger trigger) - throws AlreadyExistsException, MetaException, NoSuchObjectException, - InvalidOperationException { + @Override public void createWMTrigger(WMTrigger trigger) + throws AlreadyExistsException, MetaException, NoSuchObjectException, InvalidOperationException { rawStore.createWMTrigger(trigger); } - @Override - public void alterWMTrigger(WMTrigger trigger) + @Override public void alterWMTrigger(WMTrigger trigger) throws NoSuchObjectException, InvalidOperationException, MetaException { rawStore.alterWMTrigger(trigger); } - @Override - public void dropWMTrigger(String resourcePlanName, String triggerName, String ns) + @Override public void dropWMTrigger(String resourcePlanName, String triggerName, String ns) throws NoSuchObjectException, InvalidOperationException, MetaException { rawStore.dropWMTrigger(resourcePlanName, triggerName, ns); } - @Override - public List getTriggersForResourcePlan(String resourcePlanName, String ns) + @Override public List getTriggersForResourcePlan(String resourcePlanName, String ns) throws NoSuchObjectException, MetaException { return rawStore.getTriggersForResourcePlan(resourcePlanName, ns); } - @Override - public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException, - InvalidOperationException, MetaException { + @Override public void createPool(WMPool pool) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { rawStore.createPool(pool); } - @Override - public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException, - NoSuchObjectException, InvalidOperationException, MetaException { + @Override public void alterPool(WMNullablePool pool, String poolPath) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { rawStore.alterPool(pool, poolPath); } - @Override - public void dropWMPool(String resourcePlanName, String poolPath, String ns) + @Override public void dropWMPool(String resourcePlanName, String poolPath, String ns) throws NoSuchObjectException, InvalidOperationException, MetaException { rawStore.dropWMPool(resourcePlanName, poolPath, ns); } - @Override - public void createOrUpdateWMMapping(WMMapping mapping, boolean update) - throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, - MetaException { + @Override public void createOrUpdateWMMapping(WMMapping mapping, boolean update) + throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { rawStore.createOrUpdateWMMapping(mapping, update); } - @Override - public void dropWMMapping(WMMapping mapping) + @Override public void dropWMMapping(WMMapping mapping) throws NoSuchObjectException, InvalidOperationException, MetaException { rawStore.dropWMMapping(mapping); } - @Override - public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, - String poolPath, String ns) throws AlreadyExistsException, NoSuchObjectException, - InvalidOperationException, MetaException { + @Override public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, + String ns) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { rawStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, ns); } - @Override - public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, - String poolPath, String ns) throws NoSuchObjectException, InvalidOperationException, MetaException { + @Override public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, + String ns) throws NoSuchObjectException, InvalidOperationException, MetaException { rawStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, ns); } @@ -2979,14 +2723,12 @@ public long getCacheUpdateCount() { return sharedCache.getUpdateCount(); } - @Override - public void cleanWriteNotificationEvents(int olderThan) { + @Override public void cleanWriteNotificationEvents(int olderThan) { rawStore.cleanWriteNotificationEvents(olderThan); } - - @Override - public List getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException { + @Override public List getAllWriteEventInfo(long txnId, String dbName, String tableName) + throws MetaException { return rawStore.getAllWriteEventInfo(txnId, dbName, tableName); } @@ -2996,8 +2738,8 @@ static boolean isNotInBlackList(String catName, String dbName, String tblName) { LOG.debug("Trying to match: {} against blacklist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); if (matcher.matches()) { - LOG.debug("Found matcher group: {} at start index: {} and end index: {}", matcher.group(), - matcher.start(), matcher.end()); + LOG.debug("Found matcher group: {} at start index: {} and end index: {}", matcher.group(), matcher.start(), + matcher.end()); return false; } } @@ -3010,8 +2752,8 @@ private static boolean isInWhitelist(String catName, String dbName, String tblNa LOG.debug("Trying to match: {} against whitelist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); if (matcher.matches()) { - LOG.debug("Found matcher group: {} at start index: {} and end index: {}", matcher.group(), - matcher.start(), matcher.end()); + LOG.debug("Found matcher group: {} at start index: {} and end index: {}", matcher.group(), matcher.start(), + matcher.end()); return true; } } @@ -3052,45 +2794,39 @@ static boolean shouldCacheTable(String catName, String dbName, String tblName) { } static boolean isBlacklistWhitelistEmpty(Configuration conf) { - return MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST) - .equals(".*") - && MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST).isEmpty(); + return + MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST).equals(".*") + && MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST) + .isEmpty(); } - @VisibleForTesting - void resetCatalogCache() { + @VisibleForTesting void resetCatalogCache() { sharedCache.resetCatalogCache(); setCachePrewarmedState(false); } - @Override - public void addRuntimeStat(RuntimeStat stat) throws MetaException { + @Override public void addRuntimeStat(RuntimeStat stat) throws MetaException { rawStore.addRuntimeStat(stat); } - @Override - public List getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException { + @Override public List getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException { return rawStore.getRuntimeStats(maxEntries, maxCreateTime); } - @Override - public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { + @Override public int deleteRuntimeStats(int maxRetainSecs) throws MetaException { return rawStore.deleteRuntimeStats(maxRetainSecs); } - @Override - public List getTableNamesWithStats() throws MetaException, NoSuchObjectException { + @Override public List getTableNamesWithStats() throws MetaException, NoSuchObjectException { return rawStore.getTableNamesWithStats(); } - @Override - public List getAllTableNamesForStats() throws MetaException, NoSuchObjectException { + @Override public List getAllTableNamesForStats() throws MetaException, NoSuchObjectException { return rawStore.getAllTableNamesForStats(); } - @Override - public Map> getPartitionColsWithStats(String catName, - String dbName, String tableName) throws MetaException, NoSuchObjectException { + @Override public Map> getPartitionColsWithStats(String catName, String dbName, String tableName) + throws MetaException, NoSuchObjectException { return rawStore.getPartitionColsWithStats(catName, dbName, tableName); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 2c7354a881..ea36eefdc7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.cache; +import java.lang.reflect.Field; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; @@ -27,19 +28,30 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.TreeMap; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheStats; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.StatObjectConverter; +import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.Catalog; @@ -52,21 +64,23 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; +import org.eclipse.jetty.util.ConcurrentHashSet; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hive.metastore.cache.CachedStore.partNameToVals; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; public class SharedCache { private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); + private static final long MAX_DEFAULT_CACHE_SIZE = 1024 * 1024; private boolean isCatalogCachePrewarmed = false; private Map catalogCache = new TreeMap<>(); private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); @@ -79,17 +93,21 @@ private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); // For caching TableWrapper objects. Key is aggregate of database name and table name - private Map tableCache = new TreeMap<>(); + private ReentrantReadWriteLock tableCacheRWLock = new ReentrantReadWriteLock(true); + private Cache tableCache = null; + private boolean isTableCachePrewarmed = false; private HashSet tablesDeletedDuringPrewarm = new HashSet<>(); private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); private Map sdCache = new HashMap<>(); private static MessageDigest md; - static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); private AtomicLong cacheUpdateCount = new AtomicLong(0); - private static long maxCacheSizeInBytes = -1; - private static long currentCacheSizeInBytes = 0; - private static HashMap, ObjectEstimator> sizeEstimators = null; + private long maxCacheSizeInBytes = -1; + private HashMap, ObjectEstimator> sizeEstimators = null; + private Set tableToUpdateSize = new ConcurrentHashSet<>(); + private ScheduledExecutorService executor = null; + private Map tableSizeMap = null; enum StatsType { ALL(0), ALLBUTDEFAULT(1), PARTIAL(2); @@ -105,6 +123,10 @@ public int getPosition() { } } + private enum MemberName { + TABLE_COL_STATS_CACHE, PARTITION_CACHE, PARTITION_COL_STATS_CACHE, AGGR_COL_STATS_CACHE + } + static { try { md = MessageDigest.getInstance("MD5"); @@ -113,16 +135,120 @@ public int getPosition() { } } + static class TableWrapperSizeUpdater implements Runnable { + private Set setToUpdate; + private ReentrantReadWriteLock lock; + private Cache cache; + + TableWrapperSizeUpdater(Set set, ReentrantReadWriteLock lock1, Cache cache1) { + setToUpdate = set; + lock = lock1; + cache = cache1; + } + + @Override + public void run() { + for (String s : setToUpdate) { + refreshTableWrapperInCache(s); + } + setToUpdate.clear(); + } + + void refreshTableWrapperInCache(String tblKey) { + try { + lock.writeLock().lock(); + TableWrapper tw = cache.getIfPresent(tblKey); + if (tw != null) { + //cache will re-weigh the TableWrapper and record new weight. + cache.put(tblKey, tw); + } + } finally { + lock.writeLock().unlock(); + } + } + } + + /** + * Builder class for changing parameter of shared cache + */ + public static class Builder { + private Map tableSizeMap = null; + private int concurrencyLevel = -1; + private int refreshInterval = 10000; + private Configuration conf; + + Builder tableSizeMap(Map mp) { + this.tableSizeMap = mp; + return this; + } + + Builder configuration(Configuration c) { + this.conf = c; + return this; + } + + Builder concurrencyLevel(int cl) { + this.concurrencyLevel = cl; + return this; + } + + Builder refreshInterval(int numMillis) { + this.refreshInterval = numMillis; + return this; + } + + public SharedCache build(SharedCache sc) { + sc.tableSizeMap = this.tableSizeMap; + sc.initialize(conf, refreshInterval, concurrencyLevel); + return sc; + } + } + + public void initialize(Configuration conf, int refreshInterval, int concurrencyLevel) { + maxCacheSizeInBytes = MetastoreConf.getSizeVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY); - public void initialize(long maxSharedCacheSizeInBytes) { - maxCacheSizeInBytes = maxSharedCacheSizeInBytes; // Create estimators if ((maxCacheSizeInBytes > 0) && (sizeEstimators == null)) { sizeEstimators = IncrementalObjectSizeEstimator.createEstimators(SharedCache.class); } + + if (tableCache == null) { + CacheBuilder b = CacheBuilder.newBuilder() + .maximumWeight(maxCacheSizeInBytes > 0 ? maxCacheSizeInBytes : MAX_DEFAULT_CACHE_SIZE) + .weigher(new Weigher() { + @Override + public int weigh(String key, TableWrapper value) { + return value.getSize(); + } + }).removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification notification) { + LOG.debug("Evication happened for table " + notification.getKey()); + LOG.debug("current table cache contains " + tableCache.size() + "entries"); + } + }); + + if (concurrencyLevel > 0) { + b.concurrencyLevel(concurrencyLevel); + } + tableCache = b.recordStats().build(); + } + + executor = Executors.newScheduledThreadPool(1, new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setName("SharedCache table size updater: Thread-" + t.getId()); + t.setDaemon(true); + return t; + } + }); + executor.scheduleAtFixedRate(new TableWrapperSizeUpdater(tableToUpdateSize, tableCacheRWLock, tableCache), 0, + refreshInterval, TimeUnit.MILLISECONDS); + } - private static ObjectEstimator getMemorySizeEstimator(Class clazz) { + private ObjectEstimator getMemorySizeEstimator(Class clazz) { ObjectEstimator estimator = sizeEstimators.get(clazz); if (estimator == null) { IncrementalObjectSizeEstimator.createEstimators(clazz, sizeEstimators); @@ -131,21 +257,43 @@ private static ObjectEstimator getMemorySizeEstimator(Class clazz) { return estimator; } - static class TableWrapper { - Table t; - String location; - Map parameters; - byte[] sdHash; - ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true); + public int getObjectSize(Class clazz, Object obj) { + if (sizeEstimators == null) { + return 0; + } + + try { + ObjectEstimator oe = getMemorySizeEstimator(clazz); + return oe.estimate(obj, sizeEstimators); + } catch (Exception e) { + LOG.error("Error while getting object size.", e); + } + return 0; + } + + enum SizeMode { + Delta, Snapshot + } + + class TableWrapper { + private Table t; + private String location; + private Map parameters; + private byte[] sdHash; + private int otherSize; + private int tableColStatsCacheSize; + private int partitionCacheSize; + private int partitionColStatsCacheSize; + private int aggrColStatsCacheSize; + + private ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true); // For caching column stats for an unpartitioned table // Key is column name and the value is the col stat object - private Map tableColStatsCache = - new ConcurrentHashMap(); + private Map tableColStatsCache = new ConcurrentHashMap(); private AtomicBoolean isTableColStatsCacheDirty = new AtomicBoolean(false); // For caching partition objects // Ket is partition values and the value is a wrapper around the partition object - private Map partitionCache = - new ConcurrentHashMap(); + private Map partitionCache = new ConcurrentHashMap(); private AtomicBoolean isPartitionCacheDirty = new AtomicBoolean(false); // For caching column stats for a partitioned table // Key is aggregate of partition values, column name and the value is the col stat object @@ -164,6 +312,52 @@ private static ObjectEstimator getMemorySizeEstimator(Class clazz) { this.sdHash = sdHash; this.location = location; this.parameters = parameters; + this.tableColStatsCacheSize = 0; + this.partitionCacheSize = 0; + this.partitionColStatsCacheSize = 0; + this.aggrColStatsCacheSize = 0; + this.otherSize = getTableWrapperSizeWithoutMaps(); + } + + private int getTableWrapperSizeWithoutMaps() { + Class clazz = TableWrapper.class; + Field[] fields = clazz.getDeclaredFields(); + int size = 0; + for (Field field : fields) { + if (field.getType().equals(ConcurrentHashMap.class)) { + continue; + } + if (field.getType().equals(SharedCache.class)) { + continue; + } + try { + field.setAccessible(true); + Object val = field.get(this); + ObjectEstimator oe = getMemorySizeEstimator(field.getType()); + if (oe != null) { + size += oe.estimate(val, sizeEstimators); + } + } catch (Exception ex) { + LOG.error("Not able to estimate size.", ex); + } + } + + return size; + } + + public int getSize() { + //facilitate testing only. In production we won't use tableSizeMap at all. + if (tableSizeMap != null) { + String tblKey = CacheUtils.buildTableKey(this.t.getCatName(), this.t.getDbName(), this.t.getTableName()); + if (tableSizeMap.containsKey(tblKey)) { + return tableSizeMap.get(tblKey); + } + } + if (sizeEstimators == null) { + return 0; + } + return otherSize + tableColStatsCacheSize + partitionCacheSize + partitionColStatsCacheSize + + aggrColStatsCacheSize; } public Table getTable() { @@ -202,15 +396,69 @@ boolean sameDatabase(String catName, String dbName) { return catName.equals(t.getCatName()) && dbName.equals(t.getDbName()); } + private void updateMemberSize(MemberName mn, Integer size, SizeMode mode) { + if (sizeEstimators == null) { + return; + } + + switch (mn) { + case TABLE_COL_STATS_CACHE: + if (mode == SizeMode.Delta) { + tableColStatsCacheSize += size; + } else { + tableColStatsCacheSize = size; + } + break; + case PARTITION_CACHE: + if (mode == SizeMode.Delta) { + partitionCacheSize += size; + } else { + partitionCacheSize = size; + } + break; + case PARTITION_COL_STATS_CACHE: + if (mode == SizeMode.Delta) { + partitionColStatsCacheSize += size; + } else { + partitionColStatsCacheSize = size; + } + break; + case AGGR_COL_STATS_CACHE: + if (mode == SizeMode.Delta) { + aggrColStatsCacheSize += size; + } else { + aggrColStatsCacheSize = size; + } + break; + default: + break; + } + + String tblKey = getTblKey(); + tableToUpdateSize.add(tblKey); + } + + String getTblKey() { + Table tbl = this.t; + String catName = tbl.getCatName(); + String dbName = tbl.getDbName(); + String tblName = tbl.getTableName(); + return CacheUtils.buildTableKey(catName, dbName, tblName); + } + void cachePartition(Partition part, SharedCache sharedCache) { try { tableLock.writeLock().lock(); PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache); partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), wrapper); + int size = getObjectSize(PartitionWrapper.class, wrapper); + updateMemberSize(MemberName.PARTITION_CACHE, size, SizeMode.Delta); isPartitionCacheDirty.set(true); + // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, 0, SizeMode.Snapshot); } } finally { tableLock.writeLock().unlock(); @@ -220,31 +468,21 @@ void cachePartition(Partition part, SharedCache sharedCache) { boolean cachePartitions(Iterable parts, SharedCache sharedCache, boolean fromPrewarm) { try { tableLock.writeLock().lock(); + int size = 0; for (Partition part : parts) { - PartitionWrapper ptnWrapper = makePartitionWrapper(part, sharedCache); - if (maxCacheSizeInBytes > 0) { - ObjectEstimator ptnWrapperSizeEstimator = - getMemorySizeEstimator(PartitionWrapper.class); - long estimatedMemUsage = ptnWrapperSizeEstimator.estimate(ptnWrapper, sizeEstimators); - LOG.trace("Memory needed to cache Partition: {} is {} bytes", part, estimatedMemUsage); - if (isCacheMemoryFull(estimatedMemUsage)) { - LOG.debug( - "Cannot cache Partition: {}. Memory needed is {} bytes, whereas the memory remaining is: {} bytes.", - part, estimatedMemUsage, (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); - return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; - } - LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); - } - partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), ptnWrapper); + PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache); + partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), wrapper); + size += getObjectSize(PartitionWrapper.class, wrapper); + if (!fromPrewarm) { isPartitionCacheDirty.set(true); } } + updateMemberSize(MemberName.PARTITION_CACHE, size, SizeMode.Delta); // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, 0, SizeMode.Snapshot); } return true; } finally { @@ -300,30 +538,36 @@ public Partition removePartition(List partVal, SharedCache sharedCache) Partition part = null; try { tableLock.writeLock().lock(); - PartitionWrapper wrapper = - partitionCache.remove(CacheUtils.buildPartitionCacheKey(partVal)); + PartitionWrapper wrapper = partitionCache.remove(CacheUtils.buildPartitionCacheKey(partVal)); if (wrapper == null) { return null; } isPartitionCacheDirty.set(true); + + int size = getObjectSize(PartitionWrapper.class, wrapper); + updateMemberSize(MemberName.PARTITION_CACHE, -1 * size, SizeMode.Delta); + part = CacheUtils.assemble(wrapper, sharedCache); if (wrapper.getSdHash() != null) { sharedCache.decrSd(wrapper.getSdHash()); } // Remove col stats String partialKey = CacheUtils.buildPartitionCacheKey(partVal); - Iterator> iterator = - partitionColStatsCache.entrySet().iterator(); + Iterator> iterator = partitionColStatsCache.entrySet().iterator(); while (iterator.hasNext()) { Entry entry = iterator.next(); String key = entry.getKey(); if (key.toLowerCase().startsWith(partialKey.toLowerCase())) { + int statsSize = getObjectSize(ColumnStatisticsObj.class, entry.getValue()); + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, -1 * statsSize, SizeMode.Delta); iterator.remove(); } } + // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, 0, SizeMode.Snapshot); } } finally { tableLock.writeLock().unlock(); @@ -353,7 +597,7 @@ public void alterPartition(List partVals, Partition newPart, SharedCache } public void alterPartitionAndStats(List partVals, SharedCache sharedCache, long writeId, - Map parameters, List colStatsObjs) { + Map parameters, List colStatsObjs) { try { tableLock.writeLock().lock(); PartitionWrapper partitionWrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals)); @@ -372,8 +616,7 @@ public void alterPartitionAndStats(List partVals, SharedCache sharedCach } } - public void alterPartitions(List> partValsList, List newParts, - SharedCache sharedCache) { + public void alterPartitions(List> partValsList, List newParts, SharedCache sharedCache) { try { tableLock.writeLock().lock(); for (int i = 0; i < partValsList.size(); i++) { @@ -390,6 +633,7 @@ public void refreshPartitions(List partitions, SharedCache sharedCach Map newPartitionCache = new HashMap(); try { tableLock.writeLock().lock(); + int size = 0; for (Partition part : partitions) { if (isPartitionCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping partition cache update for table: " + getTable().getTableName() @@ -405,8 +649,10 @@ public void refreshPartitions(List partitions, SharedCache sharedCach } wrapper = makePartitionWrapper(part, sharedCache); newPartitionCache.put(key, wrapper); + size += getObjectSize(PartitionWrapper.class, wrapper); } partitionCache = newPartitionCache; + updateMemberSize(MemberName.PARTITION_CACHE, size, SizeMode.Snapshot); } finally { tableLock.writeLock().unlock(); } @@ -415,6 +661,7 @@ public void refreshPartitions(List partitions, SharedCache sharedCach public boolean updateTableColStats(List colStatsForTable) { try { tableLock.writeLock().lock(); + int statsSize = 0; for (ColumnStatisticsObj colStatObj : colStatsForTable) { // Get old stats object if present String key = colStatObj.getColName(); @@ -425,28 +672,11 @@ public boolean updateTableColStats(List colStatsForTable) { } else { // No stats exist for this key; add a new object to the cache // TODO: get rid of deepCopy after making sure callers don't use references - if (maxCacheSizeInBytes > 0) { - ObjectEstimator tblColStatsSizeEstimator = - getMemorySizeEstimator(ColumnStatisticsObj.class); - long estimatedMemUsage = - tblColStatsSizeEstimator.estimate(colStatObj, sizeEstimators); - LOG.trace("Memory needed to cache Table Column Statistics Object: {} is {} bytes", - colStatObj, estimatedMemUsage); - if (isCacheMemoryFull(estimatedMemUsage)) { - LOG.debug( - "Cannot cache Table Column Statistics Object: {}. Memory needed is {} bytes, " - + "whereas the memory remaining is: {} bytes.", - colStatObj, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); - return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; - } - LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); - } tableColStatsCache.put(key, colStatObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); } } + updateMemberSize(MemberName.TABLE_COL_STATS_CACHE, statsSize, SizeMode.Delta); isTableColStatsCacheDirty.set(true); return true; } finally { @@ -455,29 +685,30 @@ public boolean updateTableColStats(List colStatsForTable) { } public void refreshTableColStats(List colStatsForTable) { - Map newTableColStatsCache = - new HashMap(); + Map newTableColStatsCache = new HashMap(); try { tableLock.writeLock().lock(); + int statsSize = 0; for (ColumnStatisticsObj colStatObj : colStatsForTable) { if (isTableColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table col stats cache update for table: " - + getTable().getTableName() + "; the table col stats list we have is dirty."); + LOG.debug("Skipping table col stats cache update for table: " + getTable().getTableName() + + "; the table col stats list we have is dirty."); return; } String key = colStatObj.getColName(); // TODO: get rid of deepCopy after making sure callers don't use references newTableColStatsCache.put(key, colStatObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); } tableColStatsCache = newTableColStatsCache; + updateMemberSize(MemberName.TABLE_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); } finally { tableLock.writeLock().unlock(); } } public ColumnStatistics getCachedTableColStats(ColumnStatisticsDesc csd, List colNames, - String validWriteIds, boolean areTxnStatsSupported) - throws MetaException { + String validWriteIds, boolean areTxnStatsSupported) throws MetaException { List colStatObjs = new ArrayList(); try { tableLock.readLock().lock(); @@ -488,7 +719,7 @@ public ColumnStatistics getCachedTableColStats(ColumnStatisticsDesc csd, List partVal, String c try { tableLock.readLock().lock(); ColumnStatisticsObj statisticsObj = - partitionColStatsCache.get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName)); + partitionColStatsCache.get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName)); if (statisticsObj == null || writeIdList == null) { return new ColumStatsWithWriteId(-1, statisticsObj); } @@ -546,23 +780,22 @@ public ColumStatsWithWriteId getPartitionColStats(List partVal, String c } } - public List getPartColStatsList(List partNames, List colNames, - String writeIdList, boolean txnStatSupported) throws MetaException { + public List getPartColStatsList(List partNames, List colNames, String writeIdList, + boolean txnStatSupported) throws MetaException { List colStatObjs = new ArrayList<>(); try { tableLock.readLock().lock(); Table tbl = getTable(); for (String partName : partNames) { - ColumnStatisticsDesc csd = new ColumnStatisticsDesc(false, - tbl.getDbName(), tbl.getTableName()); + ColumnStatisticsDesc csd = new ColumnStatisticsDesc(false, tbl.getDbName(), tbl.getTableName()); csd.setCatName(tbl.getCatName()); csd.setPartName(partName); csd.setLastAnalyzed(0); //TODO : Need to get last analysed. This is not being used by anybody now. List statObject = new ArrayList<>(); - List partVal = Warehouse.getPartValuesFromPartName(partName); + List partVal = Warehouse.getPartValuesFromPartName(partName); for (String colName : colNames) { ColumnStatisticsObj statisticsObj = - partitionColStatsCache.get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName)); + partitionColStatsCache.get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName)); if (statisticsObj != null) { statObject.add(statisticsObj); } else { @@ -576,17 +809,17 @@ public ColumStatsWithWriteId getPartitionColStats(List partVal, String c if (!txnStatSupported) { columnStatistics.setIsStatsCompliant(false); } else { - PartitionWrapper wrapper = - partitionCache.get(CacheUtils.buildPartitionCacheKey(partVal)); + PartitionWrapper wrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVal)); if (wrapper == null) { columnStatistics.setIsStatsCompliant(false); } else { Partition partition = wrapper.getPartition(); - if (!ObjectStore.isCurrentStatsValidForTheQuery(partition.getParameters(), - partition.getWriteId(), writeIdList, false)) { + if (!ObjectStore + .isCurrentStatsValidForTheQuery(partition.getParameters(), partition.getWriteId(), writeIdList, + false)) { LOG.debug("The current cached store transactional partition column statistics for {}.{}.{} " - + "(write ID {}) are not valid for current query ({})", tbl.getDbName(), - tbl.getTableName(), partName, partition.getWriteId(), writeIdList); + + "(write ID {}) are not valid for current query ({})", tbl.getDbName(), tbl.getTableName(), + partName, partition.getWriteId(), writeIdList); columnStatistics.setIsStatsCompliant(false); } } @@ -600,10 +833,10 @@ public ColumStatsWithWriteId getPartitionColStats(List partVal, String c return colStatObjs; } - public boolean updatePartitionColStats(List partVal, - List colStatsObjs) { + public boolean updatePartitionColStats(List partVal, List colStatsObjs) { try { tableLock.writeLock().lock(); + int statsSize = 0; for (ColumnStatisticsObj colStatObj : colStatsObjs) { // Get old stats object if present String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); @@ -614,32 +847,16 @@ public boolean updatePartitionColStats(List partVal, } else { // No stats exist for this key; add a new object to the cache // TODO: get rid of deepCopy after making sure callers don't use references - if (maxCacheSizeInBytes > 0) { - ObjectEstimator ptnColStatsSizeEstimator = - getMemorySizeEstimator(ColumnStatisticsObj.class); - long estimatedMemUsage = - ptnColStatsSizeEstimator.estimate(colStatObj, sizeEstimators); - LOG.trace("Memory needed to cache Partition Column Statistics Object: {} is {} bytes", - colStatObj, estimatedMemUsage); - if (isCacheMemoryFull(estimatedMemUsage)) { - LOG.debug( - "Cannot cache Partition Column Statistics Object: {}. Memory needed is {} bytes, " - + "whereas the memory remaining is: {} bytes.", - colStatObj, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); - return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; - } - LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); - } partitionColStatsCache.put(key, colStatObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); } } + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, statsSize, SizeMode.Delta); isPartitionColStatsCacheDirty.set(true); // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, 0, SizeMode.Snapshot); } } finally { tableLock.writeLock().unlock(); @@ -650,11 +867,17 @@ public boolean updatePartitionColStats(List partVal, public void removePartitionColStats(List partVals, String colName) { try { tableLock.writeLock().lock(); - partitionColStatsCache.remove(CacheUtils.buildPartitonColStatsCacheKey(partVals, colName)); + ColumnStatisticsObj statsObj = + partitionColStatsCache.remove(CacheUtils.buildPartitonColStatsCacheKey(partVals, colName)); + if (statsObj != null) { + int statsSize = getObjectSize(ColumnStatisticsObj.class, statsObj); + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, -1 * statsSize, SizeMode.Delta); + } isPartitionColStatsCacheDirty.set(true); // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, 0, SizeMode.Snapshot); } } finally { tableLock.writeLock().unlock(); @@ -665,10 +888,12 @@ public void removeAllPartitionColStats() { try { tableLock.writeLock().lock(); partitionColStatsCache.clear(); + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, 0, SizeMode.Snapshot); isPartitionColStatsCacheDirty.set(true); // Invalidate cached aggregate stats if (!aggrColStatsCache.isEmpty()) { aggrColStatsCache.clear(); + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, 0, SizeMode.Snapshot); } } finally { tableLock.writeLock().unlock(); @@ -676,15 +901,15 @@ public void removeAllPartitionColStats() { } public void refreshPartitionColStats(List partitionColStats) { - Map newPartitionColStatsCache = - new HashMap(); + Map newPartitionColStatsCache = new HashMap(); try { tableLock.writeLock().lock(); String tableName = StringUtils.normalizeIdentifier(getTable().getTableName()); + int statsSize = 0; for (ColumnStatistics cs : partitionColStats) { if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " - + getTable().getTableName() + "; the partition column stats list we have is dirty"); + LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() + + "; the partition column stats list we have is dirty"); return; } List partVal; @@ -693,26 +918,26 @@ public void refreshPartitionColStats(List partitionColStats) { List colStatsObjs = cs.getStatsObj(); for (ColumnStatisticsObj colStatObj : colStatsObjs) { if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " - + getTable().getTableName() + "; the partition column list we have is dirty"); + LOG.debug("Skipping partition column stats cache update for table: " + getTable().getTableName() + + "; the partition column list we have is dirty"); return; } - String key = - CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); + String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); newPartitionColStatsCache.put(key, colStatObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, colStatObj); } } catch (MetaException e) { LOG.debug("Unable to cache partition column stats for table: " + tableName, e); } } partitionColStatsCache = newPartitionColStatsCache; + updateMemberSize(MemberName.PARTITION_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); } finally { tableLock.writeLock().unlock(); } } - public List getAggrPartitionColStats(List colNames, - StatsType statsType) { + public List getAggrPartitionColStats(List colNames, StatsType statsType) { List colStats = new ArrayList(); try { tableLock.readLock().lock(); @@ -739,12 +964,14 @@ public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { try { tableLock.writeLock().lock(); + int statsSize = 0; if (aggrStatsAllPartitions != null) { for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) { if (statObj != null) { List aggrStats = new ArrayList(); aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy()); aggrColStatsCache.put(statObj.getColName(), aggrStats); + statsSize += getObjectSize(ColumnStatisticsObj.class, statObj); } } } @@ -756,9 +983,11 @@ public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, aggrStats = new ArrayList(); } aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, statObj); } } } + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); isAggrPartitionColStatsCacheDirty.set(true); } finally { tableLock.writeLock().unlock(); @@ -767,10 +996,10 @@ public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition, SharedCache sharedCache, Map, Long> partNameToWriteId) { - Map> newAggrColStatsCache = - new HashMap>(); + Map> newAggrColStatsCache = new HashMap>(); try { tableLock.writeLock().lock(); + int statsSize = 0; if (partNameToWriteId != null) { for (Entry, Long> partValuesWriteIdSet : partNameToWriteId.entrySet()) { List partValues = partValuesWriteIdSet.getKey(); @@ -784,8 +1013,8 @@ public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, // skip updating the aggregate stats in the cache. long writeId = partition.getWriteId(); if (writeId != partValuesWriteIdSet.getValue()) { - LOG.info("Could not refresh the aggregate stat as partition " + partValues + " has write id " + - partValuesWriteIdSet.getValue() + " instead of " + writeId); + LOG.info("Could not refresh the aggregate stat as partition " + partValues + " has write id " + + partValuesWriteIdSet.getValue() + " instead of " + writeId); return; } } @@ -793,22 +1022,23 @@ public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, if (aggrStatsAllPartitions != null) { for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) { if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping aggregate stats cache update for table: " - + getTable().getTableName() + "; the aggregate stats list we have is dirty"); + LOG.debug("Skipping aggregate stats cache update for table: " + getTable().getTableName() + + "; the aggregate stats list we have is dirty"); return; } if (statObj != null) { List aggrStats = new ArrayList(); aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy()); newAggrColStatsCache.put(statObj.getColName(), aggrStats); + statsSize += getObjectSize(ColumnStatisticsObj.class, statObj); } } } if (aggrStatsAllButDefaultPartition != null) { for (ColumnStatisticsObj statObj : aggrStatsAllButDefaultPartition.getColStats()) { if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping aggregate stats cache update for table: " - + getTable().getTableName() + "; the aggregate stats list we have is dirty"); + LOG.debug("Skipping aggregate stats cache update for table: " + getTable().getTableName() + + "; the aggregate stats list we have is dirty"); return; } if (statObj != null) { @@ -817,10 +1047,12 @@ public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, aggrStats = new ArrayList(); } aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy()); + statsSize += getObjectSize(ColumnStatisticsObj.class, statObj); } } } aggrColStatsCache = newAggrColStatsCache; + updateMemberSize(MemberName.AGGR_COL_STATS_CACHE, statsSize, SizeMode.Snapshot); } finally { tableLock.writeLock().unlock(); } @@ -871,10 +1103,10 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared } static class PartitionWrapper { - Partition p; - String location; - Map parameters; - byte[] sdHash; + private Partition p; + private String location; + private Map parameters; + private byte[] sdHash; PartitionWrapper(Partition p, byte[] sdHash, String location, Map parameters) { this.p = p; @@ -901,8 +1133,8 @@ public String getLocation() { } static class StorageDescriptorWrapper { - StorageDescriptor sd; - int refCount = 0; + private StorageDescriptor sd; + private int refCount = 0; StorageDescriptorWrapper(StorageDescriptor sd, int refCount) { this.sd = sd; @@ -921,6 +1153,7 @@ public int getRefCount() { public static class ColumStatsWithWriteId { private long writeId; private ColumnStatisticsObj columnStatisticsObj; + public ColumStatsWithWriteId(long writeId, ColumnStatisticsObj columnStatisticsObj) { this.writeId = writeId; this.columnStatisticsObj = columnStatisticsObj; @@ -1049,8 +1282,7 @@ public void populateDatabasesInCache(List databases) { // 1. Don't add databases that were deleted while we were preparing list for prewarm // 2. Skip overwriting exisiting db object // (which is present because it was added after prewarm started) - String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), - dbCopy.getName().toLowerCase()); + String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); if (databasesDeletedDuringPrewarm.contains(key)) { continue; } @@ -1104,8 +1336,9 @@ public void removeDatabaseFromCache(String catName, String dbName) { cacheLock.readLock().lock(); for (String pair : databaseCache.keySet()) { String[] n = CacheUtils.splitDbName(pair); - if (catName.equals(n[0])) + if (catName.equals(n[0])) { results.add(n[1]); + } } } finally { cacheLock.readLock().unlock(); @@ -1173,9 +1406,9 @@ public int getCachedDatabaseCount() { } } - public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, - List partitions, List partitionColStats, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, + List partitionColStats, AggrStats aggrStatsAllPartitions, + AggrStats aggrStatsAllButDefaultPartition) { String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); @@ -1185,23 +1418,6 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, return false; } TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); - if (maxCacheSizeInBytes > 0) { - ObjectEstimator tblWrapperSizeEstimator = getMemorySizeEstimator(TableWrapper.class); - long estimatedMemUsage = tblWrapperSizeEstimator.estimate(tblWrapper, sizeEstimators); - LOG.debug("Memory needed to cache Database: {}'s Table: {}, is {} bytes", dbName, tableName, - estimatedMemUsage); - if (isCacheMemoryFull(estimatedMemUsage)) { - LOG.debug( - "Cannot cache Database: {}'s Table: {}. Memory needed is {} bytes, " - + "whereas the memory we have remaining is: {} bytes.", - dbName, tableName, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); - return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; - } - LOG.debug("Current cache size: {} bytes", currentCacheSizeInBytes); - } if (!table.isSetPartitionKeys() && (tableColStats != null)) { if (table.getPartitionKeys().isEmpty() && (tableColStats != null)) { return false; @@ -1227,8 +1443,7 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, } } } - tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition); + tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } tblWrapper.isPartitionCacheDirty.set(false); tblWrapper.isTableColStatsCacheDirty.set(false); @@ -1238,17 +1453,13 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, cacheLock.writeLock().lock(); // 2. Skip overwriting exisiting table object // (which is present because it was added after prewarm started) - tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper); + tableCache.put(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper); return true; } finally { cacheLock.writeLock().unlock(); } } - private static boolean isCacheMemoryFull(long estimatedMemUsage) { - return (0.8*maxCacheSizeInBytes) < (currentCacheSizeInBytes + estimatedMemUsage); - } - public void completeTableCachePrewarm() { try { cacheLock.writeLock().lock(); @@ -1263,12 +1474,13 @@ public Table getTableFromCache(String catName, String dbName, String tableName) Table t = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = - tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { t = CacheUtils.assemble(tblWrapper, this); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return t; @@ -1286,8 +1498,7 @@ public TableWrapper addTableToCache(String catName, String dbName, String tblNam } } - private TableWrapper createTableWrapper(String catName, String dbName, String tblName, - Table tbl) { + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); @@ -1318,19 +1529,19 @@ public void removeTableFromCache(String catName, String dbName, String tblName) if (!isTableCachePrewarmed) { tablesDeletedDuringPrewarm.add(CacheUtils.buildTableKey(catName, dbName, tblName)); } - TableWrapper tblWrapper = - tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); + String tblKey = CacheUtils.buildTableKey(catName, dbName, tblName); + TableWrapper tblWrapper = tableCache.getIfPresent(tblKey); if (tblWrapper == null) { //in case of retry, ignore second try. return; } - if (tblWrapper != null) { - byte[] sdHash = tblWrapper.getSdHash(); - if (sdHash != null) { - decrSd(sdHash); - } - isTableCacheDirty.set(true); + + byte[] sdHash = tblWrapper.getSdHash(); + if (sdHash != null) { + decrSd(sdHash); } + tableCache.invalidate(tblKey); + isTableCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); } @@ -1339,8 +1550,7 @@ public void removeTableFromCache(String catName, String dbName, String tblName) public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) { try { cacheLock.writeLock().lock(); - TableWrapper tblWrapper = - tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); @@ -1354,11 +1564,10 @@ public void alterTableInCache(String catName, String dbName, String tblName, Tab } public void alterTableAndStatsInCache(String catName, String dbName, String tblName, long writeId, - List colStatsObjs, Map newParams) { + List colStatsObjs, Map newParams) { try { cacheLock.writeLock().lock(); - TableWrapper tblWrapper = - tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper == null) { LOG.info("Table " + tblName + " is missing from cache. Cannot update table stats in cache"); return; @@ -1369,8 +1578,8 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN //tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); - tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper); tblWrapper.updateTableColStats(colStatsObjs); + tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper); isTableCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); @@ -1381,12 +1590,14 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN List
tables = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (TableWrapper wrapper : tableCache.values()) { + tableCacheRWLock.readLock().lock(); + for (TableWrapper wrapper : tableCache.asMap().values()) { if (wrapper.sameDatabase(catName, dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); } } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return tables; @@ -1396,53 +1607,55 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (TableWrapper wrapper : tableCache.values()) { + tableCacheRWLock.readLock().lock(); + for (TableWrapper wrapper : tableCache.asMap().values()) { if (wrapper.sameDatabase(catName, dbName)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); } } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return tableNames; } - public List listCachedTableNames(String catName, String dbName, String pattern, - int maxTables) { + public List listCachedTableNames(String catName, String dbName, String pattern, int maxTables) { List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); + tableCacheRWLock.readLock().lock(); int count = 0; - for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.sameDatabase(catName, dbName) - && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) - && (maxTables == -1 || count < maxTables)) { + for (TableWrapper wrapper : tableCache.asMap().values()) { + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && ( + maxTables == -1 || count < maxTables)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); count++; } } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return tableNames; } - public List listCachedTableNames(String catName, String dbName, String pattern, - TableType tableType, int limit) { + public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType, + int limit) { List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); + tableCacheRWLock.readLock().lock(); int count = 0; - for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.sameDatabase(catName, dbName) - && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) - && wrapper.getTable().getTableType().equals(tableType.toString()) - && (limit == -1 || count < limit)) { + for (TableWrapper wrapper : tableCache.asMap().values()) { + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) + && wrapper.getTable().getTableType().equals(tableType.toString()) && (limit == -1 || count < limit)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); count++; } } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return tableNames; @@ -1456,7 +1669,7 @@ public boolean refreshTablesInCache(String catName, String dbName, List
t Map newCacheForDB = new TreeMap<>(); for (Table tbl : tables) { String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { @@ -1466,7 +1679,7 @@ public boolean refreshTablesInCache(String catName, String dbName, List
t } try { cacheLock.writeLock().lock(); - Iterator> entryIterator = tableCache.entrySet().iterator(); + Iterator> entryIterator = tableCache.asMap().entrySet().iterator(); while (entryIterator.hasNext()) { String key = entryIterator.next().getKey(); if (key.startsWith(CacheUtils.buildDbKeyWithDelimiterSuffix(catName, dbName))) { @@ -1480,11 +1693,12 @@ public boolean refreshTablesInCache(String catName, String dbName, List
t } } - public ColumnStatistics getTableColStatsFromCache(String catName, String dbName, - String tblName, List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { + public ColumnStatistics getTableColStatsFromCache(String catName, String dbName, String tblName, + List colNames, String validWriteIds, boolean areTxnStatsSupported) throws MetaException { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper == null) { LOG.info("Table " + tblName + " is missing from cache."); return null; @@ -1492,21 +1706,23 @@ public ColumnStatistics getTableColStatsFromCache(String catName, String dbName, ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); return tblWrapper.getCachedTableColStats(csd, colNames, validWriteIds, areTxnStatsSupported); } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } } - public void removeTableColStatsFromCache(String catName, String dbName, String tblName, - String colName) { + public void removeTableColStatsFromCache(String catName, String dbName, String tblName, String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeTableColStats(colName); } else { LOG.info("Table " + tblName + " is missing from cache."); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1514,13 +1730,15 @@ public void removeTableColStatsFromCache(String catName, String dbName, String t public void removeAllTableColStatsFromCache(String catName, String dbName, String tblName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeAllTableColStats(); } else { LOG.info("Table " + tblName + " is missing from cache."); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1529,14 +1747,15 @@ public void updateTableColStatsInCache(String catName, String dbName, String tab List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = - tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updateTableColStats(colStatsForTable); } else { LOG.info("Table " + tableName + " is missing from cache."); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1545,14 +1764,15 @@ public void refreshTableColStatsInCache(String catName, String dbName, String ta List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = - tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.refreshTableColStats(colStatsForTable); } else { LOG.info("Table " + tableName + " is missing from cache."); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1560,24 +1780,25 @@ public void refreshTableColStatsInCache(String catName, String dbName, String ta public int getCachedTableCount() { try { cacheLock.readLock().lock(); - return tableCache.size(); + tableCacheRWLock.readLock().lock(); + return tableCache.asMap().size(); } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } } - public List getTableMeta(String catName, String dbNames, String tableNames, - List tableTypes) { + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) { List tableMetas = new ArrayList<>(); try { cacheLock.readLock().lock(); + tableCacheRWLock.readLock().lock(); for (String dbName : listCachedDatabases(catName)) { if (CacheUtils.matches(dbName, dbNames)) { for (Table table : listCachedTables(catName, dbName)) { if (CacheUtils.matches(table.getTableName(), tableNames)) { if (tableTypes == null || tableTypes.contains(table.getTableType())) { - TableMeta metaData = - new TableMeta(dbName, table.getTableName(), table.getTableType()); + TableMeta metaData = new TableMeta(dbName, table.getTableName(), table.getTableType()); metaData.setCatName(catName); metaData.setComments(table.getParameters().get("comment")); tableMetas.add(metaData); @@ -1587,6 +1808,7 @@ public int getCachedTableCount() { } } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return tableMetas; @@ -1595,163 +1817,182 @@ public int getCachedTableCount() { public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + String tblKey = CacheUtils.buildTableKey(catName, dbName, tblName); + TableWrapper tblWrapper = tableCache.getIfPresent(tblKey); if (tblWrapper != null) { tblWrapper.cachePartition(part, this); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public void addPartitionsToCache(String catName, String dbName, String tblName, - Iterable parts) { + public void addPartitionsToCache(String catName, String dbName, String tblName, Iterable parts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartitions(parts, this, false); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public Partition getPartitionFromCache(String catName, String dbName, String tblName, - List partVals) { + public Partition getPartitionFromCache(String catName, String dbName, String tblName, List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.getPartition(partVals, this); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return part; } - public boolean existPartitionFromCache(String catName, String dbName, String tblName, - List partVals) { + public boolean existPartitionFromCache(String catName, String dbName, String tblName, List partVals) { boolean existsPart = false; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { existsPart = tblWrapper.containsPartition(partVals); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return existsPart; } - public Partition removePartitionFromCache(String catName, String dbName, String tblName, - List partVals) { + public Partition removePartitionFromCache(String catName, String dbName, String tblName, List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.removePartition(partVals, this); + } else { + LOG.warn("This is abnormal"); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } return part; } - public void removePartitionsFromCache(String catName, String dbName, String tblName, - List> partVals) { + public void removePartitionsFromCache(String catName, String dbName, String tblName, List> partVals) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitions(partVals, this); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public List listCachedPartitions(String catName, String dbName, String tblName, - int max) { + public List listCachedPartitions(String catName, String dbName, String tblName, int max) { List parts = new ArrayList(); try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { parts = tblWrapper.listPartitions(max, this); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return parts; } - public void alterPartitionInCache(String catName, String dbName, String tblName, - List partVals, Partition newPart) { + public void alterPartitionInCache(String catName, String dbName, String tblName, List partVals, + Partition newPart) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartition(partVals, newPart, this); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } public void alterPartitionAndStatsInCache(String catName, String dbName, String tblName, long writeId, - List partVals, Map parameters, - List colStatsObjs) { + List partVals, Map parameters, List colStatsObjs) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartitionAndStats(partVals, this, writeId, parameters, colStatsObjs); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public void alterPartitionsInCache(String catName, String dbName, String tblName, - List> partValsList, List newParts) { + public void alterPartitionsInCache(String catName, String dbName, String tblName, List> partValsList, + List newParts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartitions(partValsList, newParts, this); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public void refreshPartitionsInCache(String catName, String dbName, String tblName, - List partitions) { + public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitions(partitions, this); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, - List partVals, String colName) { + public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, List partVals, + String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitionColStats(partVals, colName); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1759,57 +2000,63 @@ public void removePartitionColStatsFromCache(String catName, String dbName, Stri public void removeAllPartitionColStatsFromCache(String catName, String dbName, String tblName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeAllPartitionColStats(); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, - List partVals, List colStatsObjs) { + public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, List partVals, + List colStatsObjs) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = - tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public ColumStatsWithWriteId getPartitionColStatsFromCache(String catName, String dbName, - String tblName, List partVal, String colName, String writeIdList) { + public ColumStatsWithWriteId getPartitionColStatsFromCache(String catName, String dbName, String tblName, + List partVal, String colName, String writeIdList) { ColumStatsWithWriteId colStatObj = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObj = tblWrapper.getPartitionColStats(partVal, colName, writeIdList); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return colStatObj; } public List getPartitionColStatsListFromCache(String catName, String dbName, String tblName, - List partNames, List colNames, - String writeIdList, boolean txnStatSupported) { + List partNames, List colNames, String writeIdList, boolean txnStatSupported) { List colStatObjs = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObjs = tblWrapper.getPartColStatsList(partNames, colNames, writeIdList, txnStatSupported); } } catch (MetaException e) { LOG.warn("Failed to get partition column statistics"); } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return colStatObjs; @@ -1819,54 +2066,61 @@ public void refreshPartitionColStatsInCache(String catName, String dbName, Strin List partitionColStats) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitionColStats(partitionColStats); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } - public List getAggrStatsFromCache(String catName, String dbName, - String tblName, List colNames, StatsType statsType) { + public List getAggrStatsFromCache(String catName, String dbName, String tblName, + List colNames, StatsType statsType) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.readLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { return tblWrapper.getAggrPartitionColStats(colNames, statsType); } } finally { + tableCacheRWLock.readLock().unlock(); cacheLock.readLock().unlock(); } return null; } - public void addAggregateStatsToCache(String catName, String dbName, String tblName, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public void addAggregateStatsToCache(String catName, String dbName, String tblName, AggrStats aggrStatsAllPartitions, + AggrStats aggrStatsAllButDefaultPartition) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition); + tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } public void refreshAggregateStatsInCache(String catName, String dbName, String tblName, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition, - Map, Long> partNameToWriteId) { + Map, Long> partNameToWriteId) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + tableCacheRWLock.writeLock().lock(); + TableWrapper tblWrapper = tableCache.getIfPresent(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { - tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions, - aggrStatsAllButDefaultPartition, this, partNameToWriteId); + tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition, this, + partNameToWriteId); } } finally { + tableCacheRWLock.writeLock().unlock(); cacheLock.readLock().unlock(); } } @@ -1903,8 +2157,8 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { } @VisibleForTesting - Map getTableCache() { - return tableCache; + void clearTableCache() { + tableCache.invalidateAll(); } @VisibleForTesting @@ -1928,6 +2182,11 @@ void clearDirtyFlags() { isTableCacheDirty.set(false); } + public void printCacheStats() { + CacheStats cs = tableCache.stats(); + LOG.info(cs.toString()); + } + public long getUpdateCount() { return cacheUpdateCount.get(); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index e30d4a8d1f..1e4bc5064e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -26,6 +26,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.Deadline; @@ -61,7 +62,6 @@ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.junit.After; import org.junit.Assert; @@ -69,12 +69,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import jline.internal.Log; - import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -@Category(MetastoreCheckinTest.class) -public class TestCachedStore { +/** + * Unit tests for CachedStore + */ +@Category(MetastoreCheckinTest.class) public class TestCachedStore { // cs_db1 Database db1; // cs_db2 @@ -94,8 +94,7 @@ List db2Ptbl1Ptns; List db2Ptbl1PtnNames; - @Before - public void setUp() throws Exception { + @Before public void setUp() throws Exception { Deadline.registerIfNot(10000000); Deadline.startTimer(""); Configuration conf = MetastoreConf.newMetastoreConf(); @@ -130,8 +129,7 @@ public void setUp() throws Exception { objectStore.shutdown(); } - @After - public void teardown() throws Exception { + @After public void teardown() throws Exception { Deadline.startTimer(""); Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); @@ -155,8 +153,7 @@ public void teardown() throws Exception { * Methods that test CachedStore *********************************************************************************************/ - @Test - public void testPrewarm() throws Exception { + @Test public void testPrewarm() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -199,8 +196,7 @@ public void testPrewarm() throws Exception { cachedStore.shutdown(); } - @Test - public void testPrewarmBlackList() throws Exception { + @Test public void testPrewarmBlackList() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -223,8 +219,7 @@ public void testPrewarmBlackList() throws Exception { cachedStore.shutdown(); } - @Test - public void testPrewarmWhiteList() throws Exception { + @Test public void testPrewarmWhiteList() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -270,8 +265,7 @@ public void testPrewarmMemoryEstimation() throws Exception { cachedStore.shutdown(); } - @Test - public void testCacheUpdate() throws Exception { + @Test public void testCacheUpdate() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -353,8 +347,7 @@ public void testCacheUpdate() throws Exception { cachedStore.shutdown(); } - @Test - public void testCreateAndGetDatabase() throws Exception { + @Test public void testCreateAndGetDatabase() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -379,12 +372,12 @@ public void testCreateAndGetDatabase() throws Exception { Assert.assertEquals(3, allDatabases.size()); // Add another db via CachedStore String dbName1 = "testCreateAndGetDatabase1"; - Database db1 = createDatabaseObject(dbName1, dbOwner); - cachedStore.createDatabase(db1); - db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); + Database localDb1 = createDatabaseObject(dbName1, dbOwner); + cachedStore.createDatabase(localDb1); + localDb1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); // Read db via ObjectStore dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(db1, dbRead); + Assert.assertEquals(localDb1, dbRead); allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(4, allDatabases.size()); // Clean up @@ -393,8 +386,7 @@ public void testCreateAndGetDatabase() throws Exception { cachedStore.shutdown(); } - @Test - public void testDropDatabase() throws Exception { + @Test public void testDropDatabase() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -424,12 +416,12 @@ public void testDropDatabase() throws Exception { Assert.assertEquals(2, allDatabases.size()); // Create another db via CachedStore and drop via ObjectStore String dbName1 = "testDropDatabase1"; - Database db1 = createDatabaseObject(dbName1, dbOwner); - cachedStore.createDatabase(db1); - db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); + Database localDb1 = createDatabaseObject(dbName1, dbOwner); + cachedStore.createDatabase(localDb1); + localDb1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); // Read db via ObjectStore dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); - Assert.assertEquals(db1, dbRead); + Assert.assertEquals(localDb1, dbRead); allDatabases = cachedStore.getAllDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(3, allDatabases.size()); objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); @@ -440,8 +432,7 @@ public void testDropDatabase() throws Exception { cachedStore.shutdown(); } - @Test - public void testAlterDatabase() throws Exception { + @Test public void testAlterDatabase() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -480,8 +471,7 @@ public void testAlterDatabase() throws Exception { cachedStore.shutdown(); } - @Test - public void testCreateAndGetTable() throws Exception { + @Test public void testCreateAndGetTable() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -527,7 +517,6 @@ public void testCreateAndGetTable() throws Exception { cachedStore.shutdown(); } - @Test // Note: the 44Kb approximation has been determined based on trial/error. // If this starts failing on different env, might need another look. public void testGetAllTablesPrewarmMemoryLimit() throws Exception { @@ -553,8 +542,7 @@ public void testGetAllTablesPrewarmMemoryLimit() throws Exception { cachedStore.shutdown(); } - @Test - public void testGetAllTablesBlacklist() throws Exception { + @Test public void testGetAllTablesBlacklist() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -579,8 +567,7 @@ public void testGetAllTablesBlacklist() throws Exception { cachedStore.shutdown(); } - @Test - public void testGetAllTablesWhitelist() throws Exception { + @Test public void testGetAllTablesWhitelist() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -605,8 +592,7 @@ public void testGetAllTablesWhitelist() throws Exception { cachedStore.shutdown(); } - @Test - public void testGetTableByPattern() throws Exception { + @Test public void testGetTableByPattern() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -631,8 +617,7 @@ public void testGetTableByPattern() throws Exception { cachedStore.shutdown(); } - @Test - public void testAlterTable() throws Exception { + @Test public void testAlterTable() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -653,8 +638,8 @@ public void testAlterTable() throws Exception { String newOwner = "newOwner"; Table db1Utbl1ReadAlt = new Table(db1Utbl1Read); db1Utbl1ReadAlt.setOwner(newOwner); - cachedStore.alterTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), db1Utbl1ReadAlt, - "0"); + cachedStore + .alterTable(DEFAULT_CATALOG_NAME, db1Utbl1Read.getDbName(), db1Utbl1Read.getTableName(), db1Utbl1ReadAlt, "0"); db1Utbl1Read = cachedStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1ReadAlt.getDbName(), db1Utbl1ReadAlt.getTableName()); Table db1Utbl1ReadOS = @@ -664,8 +649,8 @@ public void testAlterTable() throws Exception { Table db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); Table db2Utbl1ReadAlt = new Table(db2Utbl1Read); db2Utbl1ReadAlt.setOwner(newOwner); - objectStore.alterTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName(), db2Utbl1ReadAlt, - "0"); + objectStore + .alterTable(DEFAULT_CATALOG_NAME, db2Utbl1Read.getDbName(), db2Utbl1Read.getTableName(), db2Utbl1ReadAlt, "0"); updateCache(cachedStore); db2Utbl1Read = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1ReadAlt.getDbName(), db2Utbl1ReadAlt.getTableName()); @@ -675,8 +660,7 @@ public void testAlterTable() throws Exception { cachedStore.shutdown(); } - @Test - public void testDropTable() throws Exception { + @Test public void testDropTable() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -716,12 +700,11 @@ public void testDropTable() throws Exception { /********************************************************************************************** * Methods that test SharedCache - * @throws MetaException - * @throws NoSuchObjectException + * @throws MetaException + * @throws NoSuchObjectException *********************************************************************************************/ - @Test - public void testSharedStoreDb() throws NoSuchObjectException, MetaException { + @Test public void testSharedStoreDb() throws NoSuchObjectException, MetaException { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -731,13 +714,13 @@ public void testSharedStoreDb() throws NoSuchObjectException, MetaException { cachedStore.setConfForTest(conf); SharedCache sharedCache = CachedStore.getSharedCache(); - Database db1 = createDatabaseObject("db1", "user1"); - Database db2 = createDatabaseObject("db2", "user1"); - Database db3 = createDatabaseObject("db3", "user1"); + Database localDb1 = createDatabaseObject("db1", "user1"); + Database localDb2 = createDatabaseObject("db2", "user1"); + Database localDb3 = createDatabaseObject("db3", "user1"); Database newDb1 = createDatabaseObject("newdb1", "user1"); - sharedCache.addDatabaseToCache(db1); - sharedCache.addDatabaseToCache(db2); - sharedCache.addDatabaseToCache(db3); + sharedCache.addDatabaseToCache(localDb1); + sharedCache.addDatabaseToCache(localDb2); + sharedCache.addDatabaseToCache(localDb3); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); @@ -750,8 +733,7 @@ public void testSharedStoreDb() throws NoSuchObjectException, MetaException { cachedStore.shutdown(); } - @Test - public void testSharedStoreTable() { + @Test public void testSharedStoreTable() { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -836,8 +818,7 @@ public void testSharedStoreTable() { cachedStore.shutdown(); } - @Test - public void testSharedStorePartition() { + @Test public void testSharedStorePartition() { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -1002,14 +983,14 @@ public void testAggrStatsRepeatedRead() throws Exception { Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - + objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), Warehouse.makePartName(tbl.getPartitionKeys(), partVals1), partVals1, colName); objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), Warehouse.makePartName(tbl.getPartitionKeys(), partVals2), partVals2, colName); objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), partVals1); objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName(), partVals2); - objectStore.dropTable(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName()) ; + objectStore.dropTable(DEFAULT_CATALOG_NAME, db.getName(), tbl.getTableName()); objectStore.dropDatabase(DEFAULT_CATALOG_NAME, db.getName()); cachedStore.shutdown(); } @@ -1185,8 +1166,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { cachedStore.shutdown(); } - @Test - public void testMultiThreadedSharedCacheOps() throws Exception { + @Test public void testMultiThreadedSharedCacheOps() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb"); @@ -1199,8 +1179,7 @@ public void testMultiThreadedSharedCacheOps() throws Exception { List dbNames = new ArrayList(Arrays.asList("db1", "db2", "db3", "db4", "db5")); List> tasks = new ArrayList>(); ExecutorService executor = Executors.newFixedThreadPool(50, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { + @Override public Thread newThread(Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); t.setDaemon(true); return t; @@ -1332,6 +1311,193 @@ public Object call() { cachedStore.shutdown(); } + @Test public void testPartitionSize() { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "5Kb"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + CachedStore cachedStore = new CachedStore(); + CachedStore.clearSharedCache(); + cachedStore.setConfForTestExceptSharedCache(conf); + + String dbName = "db1"; + String tbl1Name = "tbl1"; + String tbl2Name = "tbl2"; + String owner = "user1"; + Database db = createDatabaseObject(dbName, owner); + + FieldSchema col1 = new FieldSchema("col1", "int", "integer column"); + FieldSchema col2 = new FieldSchema("col2", "string", "string column"); + List cols = new ArrayList(); + cols.add(col1); + cols.add(col2); + List ptnCols = new ArrayList(); + Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); + Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); + + Map tableSizeMap = new HashMap<>(); + String tbl1Key = CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, dbName, tbl1Name); + String tbl2Key = CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, dbName, tbl2Name); + tableSizeMap.put(tbl1Key, 1000); + tableSizeMap.put(tbl2Key, 4500); + + Partition part1 = new Partition(); + StorageDescriptor sd1 = new StorageDescriptor(); + List cols1 = new ArrayList<>(); + cols1.add(new FieldSchema("col1", "int", "")); + Map params1 = new HashMap<>(); + params1.put("key", "value"); + sd1.setCols(cols1); + sd1.setParameters(params1); + sd1.setLocation("loc1"); + part1.setSd(sd1); + part1.setValues(Arrays.asList("201701")); + + Partition part2 = new Partition(); + StorageDescriptor sd2 = new StorageDescriptor(); + List cols2 = new ArrayList<>(); + cols2.add(new FieldSchema("col1", "int", "")); + Map params2 = new HashMap<>(); + params2.put("key", "value"); + sd2.setCols(cols2); + sd2.setParameters(params2); + sd2.setLocation("loc2"); + part2.setSd(sd2); + part2.setValues(Arrays.asList("201702")); + + Partition part3 = new Partition(); + StorageDescriptor sd3 = new StorageDescriptor(); + List cols3 = new ArrayList<>(); + cols3.add(new FieldSchema("col3", "int", "")); + Map params3 = new HashMap<>(); + params3.put("key2", "value2"); + sd3.setCols(cols3); + sd3.setParameters(params3); + sd3.setLocation("loc3"); + part3.setSd(sd3); + part3.setValues(Arrays.asList("201703")); + + Partition newPart1 = new Partition(); + newPart1.setDbName(dbName); + newPart1.setTableName(tbl1Name); + StorageDescriptor newSd1 = new StorageDescriptor(); + List newCols1 = new ArrayList<>(); + newCols1.add(new FieldSchema("newcol1", "int", "")); + Map newParams1 = new HashMap<>(); + newParams1.put("key", "value"); + newSd1.setCols(newCols1); + newSd1.setParameters(params1); + newSd1.setLocation("loc1new"); + newPart1.setSd(newSd1); + newPart1.setValues(Arrays.asList("201701")); + + SharedCache sharedCache = cachedStore.getSharedCache(); + new SharedCache.Builder().concurrencyLevel(1).configuration(conf).tableSizeMap(tableSizeMap).build(sharedCache); + + sharedCache.addDatabaseToCache(db); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); + + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3); + + Partition p = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); + Assert.assertNull(p); + + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, newPart1); + p = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); + Assert.assertNotNull(p); + cachedStore.shutdown(); + } + + @Test public void testShowTables() throws Exception { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "5kb"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + CachedStore cachedStore = new CachedStore(); + CachedStore.clearSharedCache(); + + cachedStore.setConfForTestExceptSharedCache(conf); + ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); + //set up table size map + Map tableSizeMap = new HashMap<>(); + String db1Utbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + String db1Ptbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName()); + String db2Utbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); + String db2Ptbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName()); + tableSizeMap.put(db1Utbl1TblKey, 4000); + tableSizeMap.put(db1Ptbl1TblKey, 4000); + tableSizeMap.put(db2Utbl1TblKey, 4000); + tableSizeMap.put(db2Ptbl1TblKey, 4000); + + SharedCache sc = cachedStore.getSharedCache(); + new SharedCache.Builder().concurrencyLevel(1).configuration(conf).tableSizeMap(tableSizeMap).build(sc); + + // Prewarm CachedStore + CachedStore.setCachePrewarmedState(false); + CachedStore.prewarm(objectStore); + + List db1Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db1.getName()); + Assert.assertEquals(2, db1Tables.size()); + List db2Tables = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, db2.getName()); + Assert.assertEquals(2, db2Tables.size()); + + cachedStore.shutdown(); + } + + @Test public void testTableEviction() throws Exception { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "5kb"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + CachedStore cachedStore = new CachedStore(); + CachedStore.clearSharedCache(); + + cachedStore.setConfForTestExceptSharedCache(conf); + ObjectStore objectStore = (ObjectStore) cachedStore.getRawStore(); + //set up table size map + Map tableSizeMap = new HashMap<>(); + String db1Utbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + String db1Ptbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName()); + String db2Utbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); + String db2Ptbl1TblKey = + CacheUtils.buildTableKey(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName()); + tableSizeMap.put(db1Utbl1TblKey, 4000); + tableSizeMap.put(db1Ptbl1TblKey, 4000); + tableSizeMap.put(db2Utbl1TblKey, 4000); + tableSizeMap.put(db2Ptbl1TblKey, 4000); + Table tblDb1Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName()); + Table tblDb1Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName()); + Table tblDb2Utbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName()); + Table tblDb2Ptbl1 = objectStore.getTable(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName()); + + SharedCache sc = cachedStore.getSharedCache(); + new SharedCache.Builder().concurrencyLevel(1).configuration(conf).tableSizeMap(tableSizeMap).build(sc); + + sc.addDatabaseToCache(db1); + sc.addDatabaseToCache(db2); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Utbl1.getDbName(), db1Utbl1.getTableName(), tblDb1Utbl1); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db1Ptbl1.getDbName(), db1Ptbl1.getTableName(), tblDb1Ptbl1); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Utbl1.getDbName(), db2Utbl1.getTableName(), tblDb2Utbl1); + sc.addTableToCache(DEFAULT_CATALOG_NAME, db2Ptbl1.getDbName(), db2Ptbl1.getTableName(), tblDb2Ptbl1); + + List db1Tables = sc.listCachedTableNames(DEFAULT_CATALOG_NAME, db1.getName()); + Assert.assertEquals(0, db1Tables.size()); + List db2Tables = sc.listCachedTableNames(DEFAULT_CATALOG_NAME, db2.getName()); + Assert.assertEquals(1, db2Tables.size()); + + cachedStore.shutdown(); + } + private Table createTestTbl(String dbName, String tblName, String tblOwner, List cols, List ptnCols) { String serdeLocation = "file:/tmp"; @@ -1535,8 +1701,8 @@ private TableAndColStats createUnpartitionedTableObjectWithColStats(Database db) } class TableAndColStats { - Table table; - ColumnStatistics colStats; + private Table table; + private ColumnStatistics colStats; TableAndColStats(Table table, ColumnStatistics colStats) { this.table = table; @@ -1592,8 +1758,8 @@ private PartitionObjectsAndNames createPartitionObjects(Table table) { } class PartitionObjectsAndNames { - List ptns; - List ptnNames; + private List ptns; + private List ptnNames; PartitionObjectsAndNames(List ptns, List ptnNames) { this.ptns = ptns;