diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index 944c813..d50fa13 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -40,6 +40,10 @@ public static String buildDbKey(String catName, String dbName) { return buildKey(catName.toLowerCase(), dbName.toLowerCase()); } + public static String buildDbKeyWithDelimiterSuffix(String catName, String dbName) { + return buildKey(catName.toLowerCase(), dbName.toLowerCase()) + delimit; + } + /** * Builds a key for the partition cache which is concatenation of partition values, each value * separated by a delimiter diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index e366ebd..6ef9a19 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -724,6 +724,7 @@ public void run() { } else { try { triggerPreWarm(rawStore); + shouldRunPrewarm = false; } catch (Exception e) { LOG.error("Prewarm failure", e); return; @@ -815,7 +816,6 @@ private void updateTableColStats(RawStore rawStore, String catName, String dbNam if (table != null && !table.isSetPartitionKeys()) { List colNames = MetaStoreUtils.getColumnNamesForTable(table); Deadline.startTimer("getTableColumnStatistics"); - ColumnStatistics tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); @@ -865,7 +865,9 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); + Deadline.startTimer("getPartitionsByNames"); List parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); + Deadline.stopTimer(); // Also save partitions for consistency as they have the stats state. for (Partition part : parts) { sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index 1c23022..60862d4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -1317,11 +1317,13 @@ public void removeTableFromCache(String catName, String dbName, String tblName) //in case of retry, ignore second try. return; } - byte[] sdHash = tblWrapper.getSdHash(); - if (sdHash != null) { - decrSd(sdHash); + if (tblWrapper != null) { + byte[] sdHash = tblWrapper.getSdHash(); + if (sdHash != null) { + decrSd(sdHash); + } + isTableCacheDirty.set(true); } - isTableCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); } @@ -1438,25 +1440,30 @@ public void alterTableAndStatsInCache(String catName, String dbName, String tblN public void refreshTablesInCache(String catName, String dbName, List tables) { try { - cacheLock.writeLock().lock(); if (isTableCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping table cache update; the table list we have is dirty."); return; } - Map newTableCache = new HashMap<>(); + Map newCacheForDB = new TreeMap<>(); for (Table tbl : tables) { String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = - tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); } - newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); + newCacheForDB.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); + } + cacheLock.writeLock().lock(); + Iterator> entryIterator = tableCache.entrySet().iterator(); + while (entryIterator.hasNext()) { + String key = entryIterator.next().getKey(); + if (key.startsWith(CacheUtils.buildDbKeyWithDelimiterSuffix(catName, dbName))) { + entryIterator.remove(); + } } - tableCache.clear(); - tableCache = newTableCache; + tableCache.putAll(newCacheForDB); } finally { cacheLock.writeLock().unlock(); }