diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index d9fb645858..0117bf6d5b 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -914,6 +914,22 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, return null; } + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, + String tblName, List partNames, List colNames) + throws MetaException { + return null; + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, + String tblName, List partNames, List colNames, + String writeIdList) + throws MetaException { + return null; + } + + @Override public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { return objectStore.getNextNotification(rqst); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 0485184554..c59b923857 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -7496,7 +7496,7 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_aggr_stats_for", ": table=" + + startFunction("getAggrStatsFor", ": table=" + TableName.getQualified(catName, dbName, tblName)); List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); @@ -7510,11 +7510,11 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce AggrStats aggrStats = null; try { - aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName, + aggrStats = getMS().getAggrStatsFor(catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames, request.getValidWriteIdList()); return aggrStats; } finally { - endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); + endFunction("getAggrStatsFor", aggrStats == null, null, request.getTblName()); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 03e3a2d257..7724f70baa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -880,11 +880,11 @@ private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectE } @Override - public Database getDatabase(String catalogName, String name) throws NoSuchObjectException { + public Database getDatabase(String catalogName, String dbName) throws NoSuchObjectException { MetaException ex = null; Database db = null; try { - db = getDatabaseInternal(catalogName, name); + db = getDatabaseInternal(catalogName, dbName); } catch (MetaException e) { // Signature restriction to NSOE, and NSOE being a flat exception prevents us from // setting the cause of the NSOE as the MetaException. We should not lose the info @@ -894,8 +894,8 @@ public Database getDatabase(String catalogName, String name) throws NoSuchObject } if (db == null) { LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException", - catalogName, name, ex); - throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage()))); + catalogName, dbName, ex); + throw new NoSuchObjectException(dbName + (ex == null ? "" : (": " + ex.getMessage()))); } return db; } @@ -980,21 +980,21 @@ public boolean alterDatabase(String catName, String dbName, Database db) } @Override - public boolean dropDatabase(String catName, String dbname) + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { boolean success = false; - LOG.info("Dropping database {}.{} along with all tables", catName, dbname); - dbname = normalizeIdentifier(dbname); + LOG.info("Dropping database {}.{} along with all tables", catName, dbName); + dbName = normalizeIdentifier(dbName); catName = normalizeIdentifier(catName); QueryWrapper queryWrapper = new QueryWrapper(); try { openTransaction(); // then drop the database - MDatabase db = getMDatabase(catName, dbname); + MDatabase db = getMDatabase(catName, dbName); pm.retrieve(db); if (db != null) { - List dbGrants = this.listDatabaseGrants(catName, dbname, null, queryWrapper); + List dbGrants = this.listDatabaseGrants(catName, dbName, null, queryWrapper); if (CollectionUtils.isNotEmpty(dbGrants)) { pm.deletePersistentAll(dbGrants); } @@ -1842,7 +1842,7 @@ private MTable getMTable(String catName, String db, String table) { } @Override - public List getTableObjectsByName(String catName, String db, List tbl_names) + public List
getTableObjectsByName(String catName, String dbName, List tbl_names) throws MetaException, UnknownDBException { List
tables = new ArrayList<>(); boolean committed = false; @@ -1850,7 +1850,7 @@ private MTable getMTable(String catName, String db, String table) { Query query = null; try { openTransaction(); - db = normalizeIdentifier(db); + dbName = normalizeIdentifier(dbName); catName = normalizeIdentifier(catName); List lowered_tbl_names = new ArrayList<>(tbl_names.size()); @@ -1860,17 +1860,17 @@ private MTable getMTable(String catName, String db, String table) { query = pm.newQuery(MTable.class); query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)"); query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names"); - Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names); + Collection mtables = (Collection) query.execute(dbName, catName, lowered_tbl_names); if (mtables == null || mtables.isEmpty()) { // Need to differentiate between an unmatched pattern and a non-existent database dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat"); dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat"); dbExistsQuery.setUnique(true); dbExistsQuery.setResult("name"); - String dbNameIfExists = (String) dbExistsQuery.execute(db, catName); + String dbNameIfExists = (String) dbExistsQuery.execute(dbName, catName); if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) { throw new UnknownDBException("Could not find database " + - DatabaseName.getQualified(catName, db)); + DatabaseName.getQualified(catName, dbName)); } } else { for (Iterator iter = mtables.iterator(); iter.hasNext(); ) { @@ -2431,25 +2431,25 @@ public boolean addPartition(Partition part) throws InvalidObjectException, @Override public Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws NoSuchObjectException, MetaException { - return getPartition(catName, dbName, tableName, part_vals, null); + List partitionValues) throws NoSuchObjectException, MetaException { + return getPartition(catName, dbName, tableName, partitionValues, null); } @Override public Partition getPartition(String catName, String dbName, String tableName, - List part_vals, + List partitionValues, String validWriteIds) throws NoSuchObjectException, MetaException { openTransaction(); MTable table = this.getMTable(catName, dbName, tableName); - MPartition mpart = getMPartition(catName, dbName, tableName, part_vals); + MPartition mpart = getMPartition(catName, dbName, tableName, partitionValues); Partition part = convertToPart(mpart); commitTransaction(); if(part == null) { throw new NoSuchObjectException("partition values=" - + part_vals.toString()); + + partitionValues.toString()); } - part.setValues(part_vals); + part.setValues(partitionValues); // If transactional table partition, check whether the current version partition // statistics in the metastore comply with the client query's snapshot isolation. long statsWriteId = mpart.getWriteId(); @@ -2641,12 +2641,12 @@ private Partition convertToPart(String catName, String dbName, String tblName, M @Override public boolean dropPartition(String catName, String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, + List partitionValues) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MPartition part = getMPartition(catName, dbName, tableName, part_vals); + MPartition part = getMPartition(catName, dbName, tableName, partitionValues); dropPartitionCommon(part); success = commitTransaction(); } finally { @@ -2877,7 +2877,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String userName, List groupNames) throws NoSuchObjectException, MetaException, InvalidObjectException { boolean success = false; try { @@ -2895,7 +2895,7 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), partVals); PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, - tblName, partName, user_name, group_names); + tblName, partName, userName, groupNames); part.setPrivileges(partAuth); } @@ -2940,13 +2940,13 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN // TODO:pc implement max @Override public List listPartitionNames(String catName, String dbName, String tableName, - short max) throws MetaException { + short maxPartitions) throws MetaException { List pns = null; boolean success = false; try { openTransaction(); LOG.debug("Executing getPartitionNames"); - pns = getPartitionNamesNoTxn(catName, dbName, tableName, max); + pns = getPartitionNamesNoTxn(catName, dbName, tableName, maxPartitions); success = commitTransaction(); } finally { if (!success) { @@ -3273,8 +3273,8 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str } @Override - public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + public List listPartitionsPsWithAuth(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { List partitions = new ArrayList<>(); boolean success = false; @@ -3283,9 +3283,9 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str try { openTransaction(); LOG.debug("executing listPartitionNamesPsWithAuth"); - Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, - part_vals, max_parts, null, queryWrapper); - MTable mtbl = getMTable(catName, db_name, tbl_name); + Collection parts = getPartitionPsQueryResults(catName, dbName, tableName, + partitionValues, maxPartitions, null, queryWrapper); + MTable mtbl = getMTable(catName, dbName, tableName); for (Object o : parts) { Partition part = convertToPart((MPartition) o); //set auth privileges @@ -3293,8 +3293,8 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name, - tbl_name, partName, userName, groupNames); + PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, dbName, + tableName, partName, userName, groupNames); part.setPrivileges(partAuth); } partitions.add(part); @@ -3308,7 +3308,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str @Override public List listPartitionNamesPs(String catName, String dbName, String tableName, - List part_vals, short max_parts) throws MetaException, NoSuchObjectException { + List partitionValues, short maxPartitions) throws MetaException, NoSuchObjectException { List partitionNames = new ArrayList<>(); boolean success = false; QueryWrapper queryWrapper = new QueryWrapper(); @@ -3317,7 +3317,7 @@ private Collection getPartitionPsQueryResults(String catName, String dbName, Str openTransaction(); LOG.debug("Executing listPartitionNamesPs"); Collection names = getPartitionPsQueryResults(catName, dbName, tableName, - part_vals, max_parts, "partitionName", queryWrapper); + partitionValues, maxPartitions, "partitionName", queryWrapper); partitionNames.addAll(names); success = commitTransaction(); } finally { @@ -4307,23 +4307,23 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public Table alterTable(String catName, String dbname, String name, Table newTable, + public Table alterTable(String catName, String dbName, String tableName, Table newTable, String queryValidWriteIds) throws InvalidObjectException, MetaException { boolean success = false; boolean registerCreationSignature = false; try { openTransaction(); - name = normalizeIdentifier(name); - dbname = normalizeIdentifier(dbname); + tableName = normalizeIdentifier(tableName); + dbName = normalizeIdentifier(dbName); catName = normalizeIdentifier(catName); MTable newt = convertToMTable(newTable); if (newt == null) { throw new InvalidObjectException("new table is invalid"); } - MTable oldt = getMTable(catName, dbname, name); + MTable oldt = getMTable(catName, dbName, tableName); if (oldt == null) { - throw new MetaException("table " + dbname + "." + name + " doesn't exist"); + throw new MetaException("table " + dbName + "." + tableName + " doesn't exist"); } // For now only alter name, owner, parameters, cols, bucketcols are allowed @@ -4369,7 +4369,7 @@ public Table alterTable(String catName, String dbname, String name, Table newTab if (!isCurrentStatsValidForTheQuery(oldt, queryValidWriteIds, true)) { StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE); LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " + - dbname + "." + name + ". will be made persistent."); + dbName + "." + tableName + ". will be made persistent."); } assert newTable.getWriteId() > 0; oldt.setWriteId(newTable.getWriteId()); @@ -4414,17 +4414,17 @@ private static String verifyStatsChangeCtx(Map oldP, Map part_vals, - Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException { + public Partition alterPartition(String catName, String dbName, String tableName, List partitionValues, + Partition newPartition, String validWriteIds) throws InvalidObjectException, MetaException { boolean success = false; Throwable e = null; Partition result = null; try { openTransaction(); - if (newPart.isSetWriteId()) { + if (newPartition.isSetWriteId()) { LOG.warn("Alter partitions with write ID called without transaction information"); } Ref oldCd = new Ref(); - result = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart, validWriteIds, oldCd); + result = alterPartitionNoTxn(catName, dbName, tableName, partitionValues, newPartition, validWriteIds, oldCd); removeUnusedColumnDescriptor(oldCd.t); // commit the changes success = commitTransaction(); @@ -4543,26 +4543,26 @@ public Partition alterPartition(String catName, String dbname, String name, List } @Override - public List alterPartitions(String catName, String dbname, String name, - List> part_vals, List newParts, + public List alterPartitions(String catName, String dbName, String tableName, + List> partitionValuesList, List newPartitions, long writeId, String queryWriteIdList) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; - List results = new ArrayList<>(newParts.size()); + List results = new ArrayList<>(newPartitions.size()); try { openTransaction(); - Iterator> part_val_itr = part_vals.iterator(); + Iterator> part_val_itr = partitionValuesList.iterator(); Set oldCds = new HashSet<>(); Ref oldCdRef = new Ref<>(); - for (Partition tmpPart: newParts) { + for (Partition tmpPart: newPartitions) { List tmpPartVals = part_val_itr.next(); if (writeId > 0) { tmpPart.setWriteId(writeId); } oldCdRef.t = null; Partition result = alterPartitionNoTxn( - catName, dbname, name, tmpPartVals, tmpPart, queryWriteIdList, oldCdRef); + catName, dbName, tableName, tmpPartVals, tmpPart, queryWriteIdList, oldCdRef); results.add(result); if (oldCdRef.t != null) { oldCds.add(oldCdRef.t); @@ -9042,7 +9042,23 @@ protected ColumnStatistics getJdoResult( } @Override + @Deprecated + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, null); + } + + @Override + @Deprecated public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + String writeIdList) + throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, final List partNames, final List colNames, String writeIdList) throws MetaException, NoSuchObjectException { // If the current stats in the metastore doesn't comply with @@ -9077,11 +9093,11 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam } } } - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames); } @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), @@ -9738,9 +9754,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partitionValues) throws MetaException { - String name = Warehouse.makePartName(partKeys, partVals); + String name = Warehouse.makePartName(partKeys, partitionValues); return this.getMPartition(catName, dbName, tableName, name) != null; } @@ -10005,7 +10021,7 @@ public Function getFunction(String catName, String dbName, String funcName) thro } @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + public NotificationEventResponse getNextNotification(NotificationEventRequest request) { boolean commited = false; Query query = null; @@ -10013,12 +10029,12 @@ public NotificationEventResponse getNextNotification(NotificationEventRequest rq result.setEvents(new ArrayList<>()); try { openTransaction(); - long lastEvent = rqst.getLastEvent(); + long lastEvent = request.getLastEvent(); query = pm.newQuery(MNotificationLog.class, "eventId > lastEvent"); query.declareParameters("java.lang.Long lastEvent"); query.setOrdering("eventId ascending"); int maxEventResponse = MetastoreConf.getIntVar(conf, ConfVars.METASTORE_MAX_EVENT_RESPONSE); - int maxEvents = (rqst.getMaxEvents() < maxEventResponse && rqst.getMaxEvents() > 0) ? rqst.getMaxEvents() : maxEventResponse; + int maxEvents = (request.getMaxEvents() < maxEventResponse && request.getMaxEvents() > 0) ? request.getMaxEvents() : maxEventResponse; query.setRange(0, maxEvents); Collection events = (Collection) query.execute(lastEvent); commited = commitTransaction(); @@ -10251,15 +10267,15 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest request) { Long result = 0L; boolean commited = false; Query query = null; try { openTransaction(); - long fromEventId = rqst.getFromEventId(); - String inputDbName = rqst.getDbName(); - String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); + long fromEventId = request.getFromEventId(); + String inputDbName = request.getDbName(); + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); long toEventId; String paramSpecs; List paramVals = new ArrayList(); @@ -10295,8 +10311,8 @@ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEv paramVals.add(catName); // count events upto toEventId if specified - if (rqst.isSetToEventId()) { - toEventId = rqst.getToEventId(); + if (request.isSetToEventId()) { + toEventId = request.getToEventId(); queryStr = queryStr + " && eventId <= toEventId"; paramSpecs = paramSpecs + ", java.lang.Long toEventId"; paramVals.add(Long.valueOf(toEventId)); @@ -10309,8 +10325,8 @@ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEv // Cap the event count by limit if specified. long eventCount = result.longValue(); - if (rqst.isSetLimit() && eventCount > rqst.getLimit()) { - eventCount = rqst.getLimit(); + if (request.isSetLimit() && eventCount > request.getLimit()) { + eventCount = request.getLimit(); } return new NotificationEventsCountResponse(eventCount); @@ -10480,10 +10496,10 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN @Override - public List getPrimaryKeys(String catName, String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String dbName, String tableName) throws MetaException { try { - return getPrimaryKeysInternal(catName, db_name, tbl_name); + return getPrimaryKeysInternal(catName, dbName, tableName); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } @@ -10578,11 +10594,11 @@ private String getPrimaryKeyConstraintName(String catName, String db_name, Strin } @Override - public List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { + public List getForeignKeys(String catName, String parentDbName, + String parentTableName, String foreignDbName, String foreignTblName) throws MetaException { try { - return getForeignKeysInternal(catName, parent_db_name, - parent_tbl_name, foreign_db_name, foreign_tbl_name, true, true); + return getForeignKeysInternal(catName, parentDbName, + parentTableName, foreignDbName, foreignTblName, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } @@ -10711,10 +10727,10 @@ private String getPrimaryKeyConstraintName(String catName, String db_name, Strin } @Override - public List getUniqueConstraints(String catName, String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String dbName, String tblName) throws MetaException { try { - return getUniqueConstraintsInternal(catName, db_name, tbl_name, true, true); + return getUniqueConstraintsInternal(catName, dbName, tblName, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } @@ -10778,30 +10794,30 @@ private String getPrimaryKeyConstraintName(String catName, String db_name, Strin } @Override - public List getNotNullConstraints(String catName, String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String dbName, String tblName) throws MetaException { try { - return getNotNullConstraintsInternal(catName, db_name, tbl_name, true, true); + return getNotNullConstraintsInternal(catName, dbName, tblName, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } @Override - public List getDefaultConstraints(String catName, String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String dbName, String tblName) throws MetaException { try { - return getDefaultConstraintsInternal(catName, db_name, tbl_name, true, true); + return getDefaultConstraintsInternal(catName, dbName, tblName, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } @Override - public List getCheckConstraints(String catName, String db_name, String tbl_name) + public List getCheckConstraints(String catName, String dbName, String tblName) throws MetaException { try { - return getCheckConstraintsInternal(catName, db_name, tbl_name, true, true); + return getCheckConstraintsInternal(catName, dbName, tblName, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 209e92a415..e21a461818 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -18,9 +18,6 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.metastore.api.*; - import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; @@ -31,6 +28,67 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hive.common.TableName; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -40,8 +98,8 @@ /*** * Annotation to skip retries */ - @Target(value = ElementType.METHOD) - @Retention(value = RetentionPolicy.RUNTIME) + @Target(ElementType.METHOD) + @Retention(RetentionPolicy.RUNTIME) @interface CanNotRetry { } @@ -127,35 +185,35 @@ void createDatabase(Database db) /** * Get a database. * @param catalogName catalog the database is in. - * @param name name of the database. + * @param dbName name of the database. * @return the database. * @throws NoSuchObjectException if no such database exists. */ - Database getDatabase(String catalogName, String name) + Database getDatabase(String catalogName, String dbName) throws NoSuchObjectException; /** * Drop a database. * @param catalogName catalog the database is in. - * @param dbname name of the database. + * @param dbName name of the database. * @return true if the database was dropped, pretty much always returns this if it returns. * @throws NoSuchObjectException no database in this catalog of this name to drop * @throws MetaException something went wrong, usually with the database. */ - boolean dropDatabase(String catalogName, String dbname) + boolean dropDatabase(String catalogName, String dbName) throws NoSuchObjectException, MetaException; /** * Alter a database. * @param catalogName name of the catalog the database is in. - * @param dbname name of the database to alter + * @param dbName name of the database to alter * @param db new version of the database. This should be complete as it will fully replace the * existing db object. * @return true if the change succeeds, could fail due to db constraint violations. * @throws NoSuchObjectException no database of this name exists to alter. * @throws MetaException something went wrong, usually with the database. */ - boolean alterDatabase(String catalogName, String dbname, Database db) + boolean alterDatabase(String catalogName, String dbName, Database db) throws NoSuchObjectException, MetaException; /** @@ -214,14 +272,12 @@ boolean dropTable(String catalogName, String dbName, String tableName) * @param catalogName catalog the table is in. * @param dbName database the table is in. * @param tableName table name. - * @param txnId transaction id of the calling transaction * @param writeIdList string format of valid writeId transaction list * @return table object, or null if no such table exists (wow it would be nice if we either * consistently returned null or consistently threw NoSuchObjectException). * @throws MetaException something went wrong in the RDBMS */ - Table getTable(String catalogName, String dbName, String tableName, - String writeIdList) throws MetaException; + Table getTable(String catalogName, String dbName, String tableName, String writeIdList) throws MetaException; /** * Add a partition. @@ -268,28 +324,26 @@ boolean addPartitions(String catName, String dbName, String tblName, * @param catName catalog name. * @param dbName database name. * @param tableName table name. - * @param part_vals partition values for this table. + * @param partitionValues partition values for this table. * @return the partition. * @throws MetaException error reading from RDBMS. * @throws NoSuchObjectException no partition matching this specification exists. */ Partition getPartition(String catName, String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException; + List partitionValues) throws MetaException, NoSuchObjectException; /** * Get a partition. * @param catName catalog name. * @param dbName database name. * @param tableName table name. - * @param part_vals partition values for this table. - * @param txnId transaction id of the calling transaction + * @param partitionValues partition values for this table. * @param writeIdList string format of valid writeId transaction list * @return the partition. * @throws MetaException error reading from RDBMS. * @throws NoSuchObjectException no partition matching this specification exists. */ - Partition getPartition(String catName, String dbName, String tableName, - List part_vals, - String writeIdList) + Partition getPartition(String catName, String dbName, String tableName, List partitionValues, + String writeIdList) throws MetaException, NoSuchObjectException; /** @@ -298,13 +352,13 @@ Partition getPartition(String catName, String dbName, String tableName, * @param dbName database name. * @param tableName table name. * @param partKeys list of partition keys used to generate the partition name. - * @param part_vals list of partition values. + * @param partitionValues list of partition values. * @return true if the partition exists, false otherwise. * @throws MetaException failure reading RDBMS * @throws NoSuchObjectException this is never thrown. */ boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List part_vals) + List partKeys, List partitionValues) throws MetaException, NoSuchObjectException; /** @@ -312,7 +366,7 @@ boolean doesPartitionExist(String catName, String dbName, String tableName, * @param catName catalog name. * @param dbName database name. * @param tableName table name. - * @param part_vals list of partition values. + * @param partitionValues list of partition values. * @return true if the partition was dropped. * @throws MetaException Error accessing the RDBMS. * @throws NoSuchObjectException no partition matching this description exists @@ -320,7 +374,7 @@ boolean doesPartitionExist(String catName, String dbName, String tableName, * @throws InvalidInputException error dropping the statistics for the partition */ boolean dropPartition(String catName, String dbName, String tableName, - List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, + List partitionValues) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** @@ -354,27 +408,27 @@ boolean dropPartition(String catName, String dbName, String tableName, /** * Alter a table. * @param catName catalog the table is in. - * @param dbname database the table is in. - * @param name name of the table. + * @param dbName database the table is in. + * @param tableName tableName of the table. * @param newTable New table object. Which parts of the table can be altered are * implementation specific. - * @return + * @return the changed/new table object * @throws InvalidObjectException The new table object is invalid. * @throws MetaException something went wrong, usually in the RDBMS or storage. */ - Table alterTable(String catName, String dbname, String name, Table newTable, + Table alterTable(String catName, String dbName, String tableName, Table newTable, String queryValidWriteIds) throws InvalidObjectException, MetaException; /** * Update creation metadata for a materialized view. * @param catName catalog name. - * @param dbname database name. - * @param tablename table name. + * @param dbName database name. + * @param tableName table name. * @param cm new creation metadata * @throws MetaException error accessing the RDBMS. */ - void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) + void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) throws MetaException; /** @@ -425,8 +479,7 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre /** * @param catName catalog name - * @param dbname - * The name of the database from which to retrieve the tables + * @param dbName The name of the database from which to retrieve the tables * @param tableNames * The names of the tables to retrieve. * @return A list of the tables retrievable from the database @@ -434,7 +487,7 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * If there are duplicate names, only one instance of the table will be returned * @throws MetaException failure in querying the RDBMS. */ - List
getTableObjectsByName(String catName, String dbname, List tableNames) + List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException; /** @@ -453,32 +506,32 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * The name of the database from which you will retrieve the table names * @param filter * The filter string - * @param max_tables + * @param maxTables * The maximum number of tables returned * @return A list of table names that match the desired filter * @throws MetaException * @throws UnknownDBException */ List listTableNamesByFilter(String catName, String dbName, String filter, - short max_tables) throws MetaException, UnknownDBException; + short maxTables) throws MetaException, UnknownDBException; /** * Get a partial or complete list of names for partitions of a table. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. - * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @param dbName database name. + * @param tableName table name. + * @param maxPartitions maximum number of partitions to retrieve, -1 for all. * @return list of partition names. * @throws MetaException there was an error accessing the RDBMS */ - List listPartitionNames(String catName, String db_name, - String tbl_name, short max_parts) throws MetaException; + List listPartitionNames(String catName, String dbName, + String tableName, short maxPartitions) throws MetaException; /** * Get a list of partition values as one big struct. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. + * @param dbName database name. + * @param tableName table name. * @param cols partition key columns * @param applyDistinct whether to apply distinct to the list * @param filter filter to apply to the partition names @@ -488,45 +541,42 @@ void updateCreationMetadata(String catName, String dbname, String tablename, Cre * @return struct with all of the partition value information * @throws MetaException error access the RDBMS */ - PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, + PartitionValuesResponse listPartitionValues(String catName, String dbName, String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException; /** * Alter a partition. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. - * @param part_vals partition values that describe the partition. - * @param new_part new partition object. This should be a complete copy of the old with + * @param dbName database name. + * @param tableName table name. + * @param partitionValues partition values that describe the partition. + * @param newPartition new partition object. This should be a complete copy of the old with * changes values, not just the parts to update. - * @return + * @return the changed/new partition object * @throws InvalidObjectException No such partition. * @throws MetaException error accessing the RDBMS. */ - Partition alterPartition(String catName, String db_name, String tbl_name, List part_vals, - Partition new_part, String queryValidWriteIds) + Partition alterPartition(String catName, String dbName, String tableName, List partitionValues, + Partition newPartition, String queryValidWriteIds) throws InvalidObjectException, MetaException; /** * Alter a set of partitions. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. - * @param part_vals_list list of list of partition values. Each outer list describes one + * @param dbName database name. + * @param tableName table name. + * @param partitionValuesList list of list of partition values. Each outer list describes one * partition (with its list of partition values). - * @param new_parts list of new partitions. The order must match the old partitions described in - * part_vals_list. Each of these should be a complete copy of the new + * @param newPartitions list of new partitions. The order must match the old partitions described in + * partitionValuesList. Each of these should be a complete copy of the new * partition, not just the pieces to update. - * @param txnId transaction id of the transaction that called this method. - * @param writeIdList valid write id list of the transaction on the current table - * @param writeid write id of the transaction for the table - * @return + * @param writeId write id of the transaction for the table * @throws InvalidObjectException One of the indicated partitions does not exist. * @throws MetaException error accessing the RDBMS. */ - List alterPartitions(String catName, String db_name, String tbl_name, - List> part_vals_list, List new_parts, long writeId, + List alterPartitions(String catName, String dbName, String tableName, + List> partitionValuesList, List newPartitions, long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException; @@ -789,8 +839,7 @@ boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges) throws InvalidObjectException, MetaException, NoSuchObjectException; - org.apache.hadoop.hive.metastore.api.Role getRole( - String roleName) throws NoSuchObjectException; + Role getRole(String roleName) throws NoSuchObjectException; List listRoleNames(); @@ -802,9 +851,7 @@ boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, Privile /** - * Get the role to principal grant mapping for given role - * @param roleName - * @return + * Get the role to principal grant mapping for given role. */ List listRoleMembers(String roleName); @@ -815,15 +862,15 @@ boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, Privile * @param dbName database name. * @param tblName table name. * @param partVals partition values - * @param user_name user to get privilege information for. - * @param group_names groups to get privilege information for. + * @param userName user to get privilege information for. + * @param groupNames groups to get privilege information for. * @return a partition * @throws MetaException error accessing the RDBMS. * @throws NoSuchObjectException no such partition exists * @throws InvalidObjectException error fetching privilege information */ Partition getPartitionWithAuth(String catName, String dbName, String tblName, - List partVals, String user_name, List group_names) + List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; /** @@ -847,35 +894,35 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, /** * Lists partition names that match a given partial specification * @param catName catalog name. - * @param db_name + * @param dbName * The name of the database which has the partitions - * @param tbl_name + * @param tableName * The name of the table which has the partitions - * @param part_vals + * @param partitionValues * A partial list of values for partitions in order of the table's partition keys. * Entries can be empty if you only want to specify latter partitions. - * @param max_parts + * @param maxPartitions * The maximum number of partitions to return * @return A list of partition names that match the partial spec. * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException No such table exists */ - List listPartitionNamesPs(String catName, String db_name, String tbl_name, - List part_vals, short max_parts) + List listPartitionNamesPs(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions) throws MetaException, NoSuchObjectException; /** * Lists partitions that match a given partial specification and sets their auth privileges. * If userName and groupNames null, then no auth privileges are set. * @param catName catalog name. - * @param db_name + * @param dbName * The name of the database which has the partitions - * @param tbl_name + * @param tableName * The name of the table which has the partitions - * @param part_vals + * @param partitionValues * A partial list of values for partitions in order of the table's partition keys * Entries can be empty if you need to specify latter partitions. - * @param max_parts + * @param maxPartitions * The maximum number of partitions to return * @param userName * The user name for the partition for authentication privileges @@ -886,8 +933,8 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @throws NoSuchObjectException No such table exists * @throws InvalidObjectException error access privilege information */ - List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + List listPartitionsPsWithAuth(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore @@ -909,7 +956,6 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @throws MetaException error accessing the RDBMS. * @throws InvalidObjectException the stats object is invalid * @throws InvalidInputException unable to record the stats for the table - * @throws TException */ Map updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals, String validWriteIds, long writeId) @@ -921,14 +967,14 @@ Partition getPartitionWithAuth(String catName, String dbName, String tblName, * @param catName catalog name. * @param dbName name of the database, defaults to current database * @param tableName name of the table - * @param colName names of the columns for which statistics is requested + * @param colNames names of the columns for which statistics is requested * @return Relevant column statistics for the column for the given table * @throws NoSuchObjectException No such table * @throws MetaException error accessing the RDBMS * */ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException; + List colNames) throws MetaException, NoSuchObjectException; /** * Returns the relevant column statistics for a given column in a given table in a given database @@ -936,8 +982,7 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String * @param catName catalog name. * @param dbName name of the database, defaults to current database * @param tableName name of the table - * @param colName names of the columns for which statistics is requested - * @param txnId transaction id of the calling transaction + * @param colNames names of the columns for which statistics is requested * @param writeIdList string format of valid writeId transaction list * @return Relevant column statistics for the column for the given table * @throws NoSuchObjectException No such table @@ -946,7 +991,7 @@ ColumnStatistics getTableColumnStatistics(String catName, String dbName, String */ ColumnStatistics getTableColumnStatistics( String catName, String dbName, String tableName, - List colName, String writeIdList) + List colNames, String writeIdList) throws MetaException, NoSuchObjectException; /** @@ -971,7 +1016,6 @@ ColumnStatistics getTableColumnStatistics( * @param tblName table name. * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] * @param colNames list of columns to get stats for - * @param txnId transaction id of the calling transaction * @param writeIdList string format of valid writeId transaction list * @return list of statistics objects * @throws MetaException error accessing the RDBMS @@ -1042,7 +1086,7 @@ void updateMasterKey(Integer seqNo, String key) String getMetaStoreSchemaVersion() throws MetaException; - abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; + void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; /** * Drop a list of partitions. @@ -1223,9 +1267,47 @@ void dropFunction(String catName, String dbName, String funcName) * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException no such table or partition */ + AggrStats getAggrStatsFor(String catName, String dbName, String tblName, + List partNames, List colNames) throws MetaException, NoSuchObjectException; + + /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + * @deprecated As of release 4.0.0, this will be removed in 5.0.0. + * (HIVE-20855) + * Use {@link #getAggrStatsFor(String, String, String, List, List)} instead. + */ + @Deprecated AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; + + /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @param writeIdList string format of valid writeId transaction list + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + */ + AggrStats getAggrStatsFor(String catName, String dbName, String tblName, + List partNames, List colNames, + String writeIdList) + throws MetaException, NoSuchObjectException; + /** * Get aggregated stats for a table or partition(s). * @param catName catalog name. @@ -1234,17 +1316,21 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, * @param partNames list of partition names. These are the names of the partitions, not * values. * @param colNames list of column names - * @param txnId transaction id of the calling transaction * @param writeIdList string format of valid writeId transaction list * @return aggregated stats * @throws MetaException error accessing RDBMS * @throws NoSuchObjectException no such table or partition + * @deprecated As of release 4.0.0, this will be removed in 5.0.0. + * (HIVE-20855) + * Use {@link #getAggrStatsFor(String, String, String, List, List, String)} instead. */ + @Deprecated AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException; + /** * Get column stats for all partitions of all tables in the database * @param catName catalog name @@ -1258,10 +1344,10 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, /** * Get the next notification event. - * @param rqst Request containing information on the last processed notification. + * @param request Request containing information on the last processed notification. * @return list of notifications, sorted by eventId */ - NotificationEventResponse getNextNotification(NotificationEventRequest rqst); + NotificationEventResponse getNextNotification(NotificationEventRequest request); /** @@ -1281,16 +1367,14 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, * Get the last issued notification event id. This is intended for use by the export command * so that users can determine the state of the system at the point of the export, * and determine which notification events happened before or after the export. - * @return */ CurrentNotificationEventId getCurrentNotificationEventId(); /** * Get the number of events corresponding to given database with fromEventId. * This is intended for use by the repl commands to track the progress of incremental dump. - * @return */ - NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); + NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest request); /* * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -1308,7 +1392,7 @@ AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, /** * @param fileIds List of file IDs from the filesystem. * @param metadata Metadata buffers corresponding to fileIds in the list. - * @param type The type; determines the class that can do additiona processing for metadata. + * @param type The type; determines the class that can do additional processing for metadata. */ void putFileMetadata(List fileIds, List metadata, FileMetadataExprType type) throws MetaException; @@ -1360,74 +1444,74 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] * Get the primary associated with a table. Strangely enough each SQLPrimaryKey is actually a * column in they key, not the key itself. Thus the list. * @param catName catalog name - * @param db_name database name - * @param tbl_name table name + * @param dbName database name + * @param tableName table name * @return list of primary key columns or an empty list if the table does not have a primary key * @throws MetaException error accessing the RDBMS */ - List getPrimaryKeys(String catName, String db_name, String tbl_name) + List getPrimaryKeys(String catName, String dbName, String tableName) throws MetaException; /** * Get the foreign keys for a table. All foreign keys for a particular table can be fetched by * passing null for the last two arguments. * @param catName catalog name. - * @param parent_db_name Database the table referred to is in. This can be null to match all + * @param parentDbName Database the table referred to is in. This can be null to match all * databases. - * @param parent_tbl_name Table that is referred to. This can be null to match all tables. - * @param foreign_db_name Database the table with the foreign key is in. - * @param foreign_tbl_name Table with the foreign key. + * @param parentTableName Table that is referred to. This can be null to match all tables. + * @param foreignDbName Database the table with the foreign key is in. + * @param foreignTblName Table with the foreign key. * @return List of all matching foreign key columns. Note that if more than one foreign key * matches the arguments the results here will be all mixed together into a single list. * @throws MetaException error access the RDBMS. */ - List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + List getForeignKeys(String catName, String parentDbName, + String parentTableName, String foreignDbName, String foreignTblName) throws MetaException; /** * Get unique constraints associated with a table. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. + * @param dbName database name. + * @param tblName table name. * @return list of unique constraints * @throws MetaException error access the RDBMS. */ - List getUniqueConstraints(String catName, String db_name, - String tbl_name) throws MetaException; + List getUniqueConstraints(String catName, String dbName, + String tblName) throws MetaException; /** * Get not null constraints on a table. * @param catName catalog name. - * @param db_name database name. - * @param tbl_name table name. + * @param dbName database name. + * @param tblName table name. * @return list of not null constraints * @throws MetaException error accessing the RDBMS. */ - List getNotNullConstraints(String catName, String db_name, - String tbl_name) throws MetaException; + List getNotNullConstraints(String catName, String dbName, + String tblName) throws MetaException; /** * Get default values for columns in a table. * @param catName catalog name - * @param db_name database name - * @param tbl_name table name + * @param dbName database name + * @param tblName table name * @return list of default values defined on the table. * @throws MetaException error accessing the RDBMS */ - List getDefaultConstraints(String catName, String db_name, - String tbl_name) throws MetaException; + List getDefaultConstraints(String catName, String dbName, + String tblName) throws MetaException; /** * Get check constraints for columns in a table. * @param catName catalog name. - * @param db_name database name - * @param tbl_name table name - * @return ccheck constraints for this table + * @param dbName database name + * @param tblName table name + * @return check constraints for this table * @throws MetaException error accessing the RDBMS */ - List getCheckConstraints(String catName, String db_name, - String tbl_name) throws MetaException; + List getCheckConstraints(String catName, String dbName, + String tblName) throws MetaException; /** * Create a table with constraints @@ -1480,7 +1564,7 @@ void dropConstraint(String catName, String dbName, String tableName, String cons * @param pks Columns in the primary key. * @return the name of the constraint, as a list of strings. * @throws InvalidObjectException The SQLPrimaryKeys list is malformed - * @throws MetaException error accessing the RDMBS + * @throws MetaException error accessing the RDBMS */ List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; @@ -1532,8 +1616,6 @@ void dropConstraint(String catName, String dbName, String tableName, String cons /** * Gets the unique id of the backing datastore for the metadata - * @return - * @throws MetaException */ String getMetastoreDbUuid() throws MetaException; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java index 2e92a4f4e1..dddf064dd3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/ByteArrayWrapper.java @@ -20,9 +20,15 @@ import java.util.Arrays; /** - * byte array with comparator + * byte array with comparator. */ public class ByteArrayWrapper { + + /** + * @deprecated As of release 4.0.0 {@link }, this will be made private in 5.0.0. + * (HIVE-20855) + */ + @Deprecated byte[] wrapped; ByteArrayWrapper(byte[] b) { @@ -42,4 +48,4 @@ public boolean equals(Object other) { public int hashCode() { return Arrays.hashCode(wrapped); } -} \ No newline at end of file +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index 944c81313a..3af15142f9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -30,7 +30,11 @@ import org.apache.hadoop.hive.metastore.utils.StringUtils; public class CacheUtils { - private static final String delimit = "\u0001"; + private static final String DELIMITER = "\u0001"; + private static final Pattern QUESTION_MARK_PATTERN = Pattern.compile("\\?"); + private static final Pattern STAR_PATTERN = Pattern.compile("\\*"); + private static final Pattern CARET_PATTERN = Pattern.compile("\\^"); + private static final Pattern DOLLAR_PATTERN = Pattern.compile("\\$"); public static String buildCatalogKey(String catName) { return catName; @@ -42,14 +46,13 @@ public static String buildDbKey(String catName, String dbName) { /** * Builds a key for the partition cache which is concatenation of partition values, each value - * separated by a delimiter - * + * separated by a delimiter. */ public static String buildPartitionCacheKey(List partVals) { if (partVals == null || partVals.isEmpty()) { return ""; } - return String.join(delimit, partVals); + return String.join(DELIMITER, partVals); } public static String buildTableKey(String catName, String dbName, String tableName) { @@ -62,22 +65,21 @@ public static String buildTableColKey(String catName, String dbName, String tabl } private static String buildKey(String... elements) { - return org.apache.commons.lang.StringUtils.join(elements, delimit); + return org.apache.commons.lang.StringUtils.join(elements, DELIMITER); } public static String[] splitDbName(String key) { - String[] names = key.split(delimit); + String[] names = key.split(DELIMITER); assert names.length == 2; return names; } /** * Builds a key for the partitions column cache which is concatenation of partition values, each - * value separated by a delimiter and the column name - * + * value separated by a delimiter and the column name. */ public static String buildPartitonColStatsCacheKey(List partVals, String colName) { - return buildPartitionCacheKey(partVals) + delimit + colName; + return buildPartitionCacheKey(partVals) + DELIMITER + colName; } static Table assemble(TableWrapper wrapper, SharedCache sharedCache) { @@ -92,7 +94,7 @@ static Table assemble(TableWrapper wrapper, SharedCache sharedCache) { } if (sdCopy.getSkewedInfo() == null) { sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(), - Collections.emptyList(), Collections.emptyMap())); + Collections.emptyList(), Collections.emptyMap())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -113,7 +115,7 @@ static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { } if (sdCopy.getSkewedInfo() == null) { sdCopy.setSkewedInfo(new SkewedInfo(Collections.emptyList(), - Collections.emptyList(), Collections.emptyMap())); + Collections.emptyList(), Collections.emptyMap())); } sdCopy.setLocation(wrapper.getLocation()); sdCopy.setParameters(wrapper.getParameters()); @@ -125,12 +127,20 @@ static Partition assemble(PartitionWrapper wrapper, SharedCache sharedCache) { public static boolean matches(String name, String pattern) { String[] subpatterns = pattern.trim().split("\\|"); for (String subpattern : subpatterns) { - subpattern = "(?i)" + subpattern.replaceAll("\\?", ".{1}").replaceAll("\\*", ".*") - .replaceAll("\\^", "\\\\^").replaceAll("\\$", "\\\\$"); + subpattern = "(?i)" + DOLLAR_PATTERN.matcher( + CARET_PATTERN.matcher( + STAR_PATTERN.matcher( + QUESTION_MARK_PATTERN.matcher(subpattern).replaceAll(".{1}") + ).replaceAll(".*") + ).replaceAll("\\\\^") + ).replaceAll("\\\\$"); if (Pattern.matches(subpattern, StringUtils.normalizeIdentifier(name))) { return true; } } return false; } + + private CacheUtils() { + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index e4ef46fdb4..8f1a754496 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -17,10 +17,8 @@ */ package org.apache.hadoop.hive.metastore.cache; - import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.EmptyStackException; import java.util.HashMap; @@ -30,13 +28,13 @@ import java.util.Stack; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.hadoop.conf.Configurable; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.DatabaseName; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -49,7 +47,68 @@ import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetPartitionsFilterSpec; +import org.apache.hadoop.hive.metastore.api.GetPartitionsProjectionSpec; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.RuntimeStat; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator; import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory; @@ -60,15 +119,12 @@ import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; -import org.apache.hadoop.hive.metastore.utils.StringUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; - import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; @@ -82,23 +138,28 @@ // TODO initial load slow? // TODO size estimation -public class CachedStore implements RawStore, Configurable { - private static ScheduledExecutorService cacheUpdateMaster = null; - private static List whitelistPatterns = null; - private static List blacklistPatterns = null; +/** + * This is an implementation of RawStore that can cache results gathered from another RawStore implementation in memory. + * On startup it prewarms the cache by retrieving all catalogs, tables and partitions. + * The memory it uses can be constrained and there is a black- and whitelist available to not cache certain objects. + */ +public class CachedStore implements RawStore { + private static ScheduledExecutorService cacheUpdateMaster; + private static List whitelistPatterns; + private static List blacklistPatterns; // Default value set to 100 milliseconds for test purpose - private static long DEFAULT_CACHE_REFRESH_PERIOD = 100; + private static final long DEFAULT_CACHE_REFRESH_PERIOD = 100; // Time after which metastore cache is updated from metastore DB by the background update thread private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD; - private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false); - private static TablesPendingPrewarm tblsPendingPrewarm = new TablesPendingPrewarm(); - private RawStore rawStore = null; + private static final AtomicBoolean IS_CACHE_PREWARMED = new AtomicBoolean(false); + private static final TablesPendingPrewarm TABLES_PENDING_PREWARM = new TablesPendingPrewarm(); + private RawStore rawStore; private Configuration conf; private boolean areTxnStatsSupported; - private PartitionExpressionProxy expressionProxy = null; - private static final SharedCache sharedCache = new SharedCache(); + private PartitionExpressionProxy expressionProxy; + private static final SharedCache SHARED_CACHE = new SharedCache(); - static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName()); @Override public void setConf(Configuration conf) { @@ -109,9 +170,8 @@ public void setConf(Configuration conf) { } /** - * Similar to setConf but used from within the tests - * This does start the background thread for prewarm and update - * @param conf + * Similar to setConf but used from within the tests. + * This does start the background thread for prewarm and update. */ void setConfForTest(Configuration conf) { setConfInternal(conf); @@ -124,7 +184,7 @@ private void setConfInternal(Configuration conf) { MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName()); if (rawStore == null) { try { - rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance(); + rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance(); } catch (Exception e) { throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e); } @@ -132,7 +192,7 @@ private void setConfInternal(Configuration conf) { rawStore.setConf(conf); Configuration oldConf = this.conf; this.conf = conf; - this.areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED); + areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED); if (expressionProxy != null && conf != oldConf) { LOG.warn("Unexpected setConf when we were already configured"); } else { @@ -143,7 +203,7 @@ private void setConfInternal(Configuration conf) { private void initSharedCache(Configuration conf) { long maxSharedCacheSizeInBytes = MetastoreConf.getSizeVar(conf, ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY); - sharedCache.initialize(maxSharedCacheSizeInBytes); + SHARED_CACHE.initialize(maxSharedCacheSizeInBytes); if (maxSharedCacheSizeInBytes > 0) { LOG.info("Maximum memory that the cache will use: {} GB", maxSharedCacheSizeInBytes / (1024 * 1024 * 1024)); @@ -156,30 +216,30 @@ private void initSharedCache(Configuration conf) { * ObjectStore and populating the respective caches */ static void prewarm(RawStore rawStore) { - if (isCachePrewarmed.get()) { + if (IS_CACHE_PREWARMED.get()) { return; } long startTime = System.nanoTime(); LOG.info("Prewarming CachedStore"); - while (!isCachePrewarmed.get()) { + while (!IS_CACHE_PREWARMED.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); Collection catalogsToCache; try { catalogsToCache = catalogsToCache(rawStore); - LOG.info("Going to cache catalogs: " - + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", ")); + LOG.info("Going to cache catalogs: {}", StringUtils.join(catalogsToCache, ", ")); List catalogs = new ArrayList<>(catalogsToCache.size()); for (String catName : catalogsToCache) { catalogs.add(rawStore.getCatalog(catName)); } - sharedCache.populateCatalogsInCache(catalogs); + SHARED_CACHE.populateCatalogsInCache(catalogs); } catch (MetaException | NoSuchObjectException e) { LOG.warn("Failed to populate catalogs in cache, going to try again", e); // try again continue; } LOG.info("Finished prewarming catalogs, starting on databases"); + List databases = new ArrayList<>(); for (String catName : catalogsToCache) { try { @@ -190,37 +250,36 @@ static void prewarm(RawStore rawStore) { databases.add(rawStore.getDatabase(catName, dbName)); } catch (NoSuchObjectException e) { // Continue with next database - LOG.warn("Failed to cache database " - + DatabaseName.getQualified(catName, dbName) + ", moving on", e); + LOG.warn("Failed to cache database {}, moving on", DatabaseName.getQualified(catName, dbName), e); } } } catch (MetaException e) { - LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e); + LOG.warn("Failed to cache databases in catalog {}, moving on", catName, e); } } - sharedCache.populateDatabasesInCache(databases); + SHARED_CACHE.populateDatabasesInCache(databases); LOG.info( "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache"); + int numberOfDatabasesCachedSoFar = 0; for (Database db : databases) { - String catName = StringUtils.normalizeIdentifier(db.getCatalogName()); - String dbName = StringUtils.normalizeIdentifier(db.getName()); + String catName = normalizeIdentifier(db.getCatalogName()); + String dbName = normalizeIdentifier(db.getName()); List tblNames; try { tblNames = rawStore.getAllTables(catName, dbName); } catch (MetaException e) { - LOG.warn("Failed to cache tables for database " - + DatabaseName.getQualified(catName, dbName) + ", moving on"); + LOG.warn("Failed to cache tables for database {}, moving on", DatabaseName.getQualified(catName, dbName)); // Continue with next database continue; } - tblsPendingPrewarm.addTableNamesForPrewarming(tblNames); + TABLES_PENDING_PREWARM.addTableNamesForPrewarming(tblNames); int totalTablesToCache = tblNames.size(); int numberOfTablesCachedSoFar = 0; - while (tblsPendingPrewarm.hasMoreTablesToPrewarm()) { + while (TABLES_PENDING_PREWARM.hasMoreTablesToPrewarm()) { try { String tblName = - StringUtils.normalizeIdentifier(tblsPendingPrewarm.getNextTableNameToPrewarm()); + normalizeIdentifier(TABLES_PENDING_PREWARM.getNextTableNameToPrewarm()); if (!shouldCacheTable(catName, dbName, tblName)) { continue; } @@ -257,7 +316,7 @@ static void prewarm(RawStore rawStore) { // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.getAggrStatsFor(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -274,7 +333,7 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.getAggrStatsFor(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); } } else { @@ -284,7 +343,7 @@ static void prewarm(RawStore rawStore) { Deadline.stopTimer(); } // If the table could not cached due to memory limit, stop prewarm - boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions, + boolean isSuccess = SHARED_CACHE.populateTableInCache(table, tableColStats, partitions, partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); if (isSuccess) { LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName); @@ -300,30 +359,32 @@ static void prewarm(RawStore rawStore) { // Continue with next table continue; } + ++numberOfTablesCachedSoFar; LOG.debug("Processed database: {}'s table: {}. Cached {} / {} tables so far.", dbName, - tblName, ++numberOfTablesCachedSoFar, totalTablesToCache); + tblName, numberOfTablesCachedSoFar, totalTablesToCache); } catch (EmptyStackException e) { // We've prewarmed this database, continue with the next one continue; } } + ++numberOfDatabasesCachedSoFar; LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName, - ++numberOfDatabasesCachedSoFar, databases.size()); + numberOfDatabasesCachedSoFar, databases.size()); } completePrewarm(startTime); } } private static void completePrewarm(long startTime) { - isCachePrewarmed.set(true); + IS_CACHE_PREWARMED.set(true); LOG.info("CachedStore initialized"); long endTime = System.nanoTime(); - LOG.info("Time taken in prewarming = " + (endTime - startTime) / 1000000 + "ms"); - sharedCache.completeTableCachePrewarm(); + LOG.info("Time taken in prewarming = {}ms", (endTime - startTime) / 1000000); + SHARED_CACHE.completeTableCachePrewarm(); } - static class TablesPendingPrewarm { - private Stack tableNames = new Stack<>(); + private static class TablesPendingPrewarm { + private final Stack tableNames = new Stack<>(); private synchronized void addTableNamesForPrewarming(List tblNames) { tableNames.clear(); @@ -350,7 +411,7 @@ private synchronized void prioritizeTableForPrewarm(String tblName) { @VisibleForTesting static void setCachePrewarmedState(boolean state) { - isCachePrewarmed.set(state); + IS_CACHE_PREWARMED.set(state); } private static void initBlackListWhiteList(Configuration conf) { @@ -391,14 +452,11 @@ static synchronized void startCacheUpdateService(Configuration conf, boolean run ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS); } LOG.info("CachedStore: starting cache update service (run every {} ms", cacheRefreshPeriodMS); - cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - Thread t = Executors.defaultThreadFactory().newThread(r); - t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); - t.setDaemon(true); - return t; - } + cacheUpdateMaster = Executors.newScheduledThreadPool(1, (Runnable r) -> { + Thread t = Executors.defaultThreadFactory().newThread(r); + t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId()); + t.setDaemon(true); + return t; }); if (!runOnlyOnce) { cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0, @@ -436,7 +494,7 @@ static void setCacheRefreshPeriod(long time) { } static class CacheUpdateMasterWork implements Runnable { - private boolean shouldRunPrewarm = true; + private boolean shouldRunPrewarm; private final RawStore rawStore; CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) { @@ -455,20 +513,19 @@ static void setCacheRefreshPeriod(long time) { @Override public void run() { - if (!shouldRunPrewarm) { - // TODO: prewarm and update can probably be merged. - update(); - } else { + if (shouldRunPrewarm) { try { prewarm(rawStore); } catch (Exception e) { LOG.error("Prewarm failure", e); - return; } + } else { + // TODO: prewarm and update can probably be merged. + update(); } } - void update() { + private void update() { Deadline.registerIfNot(1000000); LOG.debug("CachedStore: updating cached objects"); try { @@ -500,8 +557,8 @@ void update() { updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName); } } - } - sharedCache.incrementUpdateCount(); + } + SHARED_CACHE.incrementUpdateCount(); } catch (MetaException e) { LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); } @@ -512,41 +569,39 @@ private void updateDatabases(RawStore rawStore, String catName, List dbN // Prepare the list of databases List databases = new ArrayList<>(); for (String dbName : dbNames) { - Database db; try { - db = rawStore.getDatabase(catName, dbName); + Database db = rawStore.getDatabase(catName, dbName); databases.add(db); } catch (NoSuchObjectException e) { - LOG.info("Updating CachedStore: database - " + catName + "." + dbName - + " does not exist.", e); + LOG.info("Updating CachedStore: database - {}.{} does not exist.", catName, dbName, e); } } - sharedCache.refreshDatabasesInCache(databases); + SHARED_CACHE.refreshDatabasesInCache(databases); } private void updateTables(RawStore rawStore, String catName, String dbName) { - List
tables = new ArrayList<>(); try { List tblNames = rawStore.getAllTables(catName, dbName); + List
tables = new ArrayList<>(); for (String tblName : tblNames) { if (!shouldCacheTable(catName, dbName, tblName)) { continue; } - Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName)); + Table table = rawStore.getTable(normalizeIdentifier(catName), + normalizeIdentifier(dbName), + normalizeIdentifier(tblName)); tables.add(table); } - sharedCache.refreshTablesInCache(catName, dbName, tables); + SHARED_CACHE.refreshTablesInCache(catName, dbName, tables); } catch (MetaException e) { - LOG.debug("Unable to refresh cached tables for database: " + dbName, e); + LOG.debug("Unable to refresh cached tables for database: {}", dbName, e); } } private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) { - boolean committed = false; rawStore.openTransaction(); + boolean committed = false; try { Table table = rawStore.getTable(catName, dbName, tblName); if (!table.isSetPartitionKeys()) { @@ -557,19 +612,19 @@ private void updateTableColStats(RawStore rawStore, String catName, String dbNam rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); if (tableColStats != null) { - sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); + SHARED_CACHE.refreshTableColStatsInCache(normalizeIdentifier(catName), + normalizeIdentifier(dbName), + normalizeIdentifier(tblName), tableColStats.getStatsObj()); // Update the table to get consistent stats state. - sharedCache.alterTableInCache(catName, dbName, tblName, table); + SHARED_CACHE.alterTableInCache(catName, dbName, tblName, table); } } committed = rawStore.commitTransaction(); } catch (MetaException | NoSuchObjectException e) { - LOG.info("Unable to refresh table column stats for table: " + tblName, e); + LOG.info("Unable to refresh table column stats for table: {}", tblName, e); } finally { if (!committed) { - sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName); + SHARED_CACHE.removeAllTableColStatsFromCache(catName, dbName, tblName); rawStore.rollbackTransaction(); } } @@ -580,17 +635,17 @@ private void updateTablePartitions(RawStore rawStore, String catName, String dbN Deadline.startTimer("getPartitions"); List partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); - sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), partitions); + SHARED_CACHE.refreshPartitionsInCache(normalizeIdentifier(catName), + normalizeIdentifier(dbName), + normalizeIdentifier(tblName), partitions); } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); + LOG.info("Updating CachedStore: unable to read partitions of table: {}", tblName, e); } } private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { - boolean committed = false; rawStore.openTransaction(); + boolean committed = false; try { Table table = rawStore.getTable(catName, dbName, tblName); List colNames = MetaStoreUtils.getColumnNamesForTable(table); @@ -600,18 +655,18 @@ private void updateTablePartitionColStats(RawStore rawStore, String catName, Str List partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); + SHARED_CACHE.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); List parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); // Also save partitions for consistency as they have the stats state. for (Partition part : parts) { - sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); + SHARED_CACHE.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part); } committed = rawStore.commitTransaction(); } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); + LOG.info("Updating CachedStore: unable to read partitions of table: {}", tblName, e); } finally { if (!committed) { - sharedCache.removeAllPartitionColStatsFromCache(catName, dbName, tblName); + SHARED_CACHE.removeAllPartitionColStatsFromCache(catName, dbName, tblName); rawStore.rollbackTransaction(); } } @@ -626,34 +681,33 @@ private void updateTableAggregatePartitionColStats(RawStore rawStore, String cat List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); List colNames = MetaStoreUtils.getColumnNamesForTable(table); if ((partNames != null) && (partNames.size() > 0)) { - Deadline.startTimer("getAggregareStatsForAllPartitions"); + Deadline.startTimer("getAggregateStatsForAllPartitions"); AggrStats aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.getAggrStatsFor(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate stats again List partKeys = table.getPartitionKeys(); String defaultPartitionValue = MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME); - List partCols = new ArrayList(); - List partVals = new ArrayList(); + List partCols = new ArrayList<>(); + List partVals = new ArrayList<>(); for (FieldSchema fs : partKeys) { partCols.add(fs.getName()); partVals.add(defaultPartitionValue); } String defaultPartitionName = FileUtils.makePartName(partCols, partVals); partNames.remove(defaultPartitionName); - Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); + Deadline.startTimer("getAggregateStatsForAllPartitionsExceptDefault"); AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); + rawStore.getAggrStatsFor(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), - StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, + SHARED_CACHE.refreshAggregateStatsInCache(normalizeIdentifier(catName), + normalizeIdentifier(dbName), + normalizeIdentifier(tblName), aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } } catch (MetaException | NoSuchObjectException e) { - LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName, - e); + LOG.info("Updating CachedStore: unable to read aggregate column stats of table: {}", tblName, e); } } } @@ -691,98 +745,98 @@ public void rollbackTransaction() { @Override public void createCatalog(Catalog cat) throws MetaException { rawStore.createCatalog(cat); - sharedCache.addCatalogToCache(cat); + SHARED_CACHE.addCatalogToCache(cat); } @Override public void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException { rawStore.alterCatalog(catName, cat); - sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat); + SHARED_CACHE.alterCatalogInCache(normalizeIdentifier(catName), cat); } @Override public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { - if (!sharedCache.isCatalogCachePrewarmed()) { + if (!SHARED_CACHE.isCatalogCachePrewarmed()) { return rawStore.getCatalog(catalogName); } - Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName)); + Catalog cat = SHARED_CACHE.getCatalogFromCache(normalizeIdentifier(catalogName)); if (cat == null) { - throw new NoSuchObjectException(); + throw new NoSuchObjectException("Catalog '" + catalogName + "' not found in cache"); } return cat; } @Override public List getCatalogs() throws MetaException { - if (!sharedCache.isCatalogCachePrewarmed()) { + if (!SHARED_CACHE.isCatalogCachePrewarmed()) { return rawStore.getCatalogs(); } - return sharedCache.listCachedCatalogs(); + return SHARED_CACHE.listCachedCatalogs(); } @Override public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { rawStore.dropCatalog(catalogName); catalogName = catalogName.toLowerCase(); - sharedCache.removeCatalogFromCache(catalogName); + SHARED_CACHE.removeCatalogFromCache(catalogName); } @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); - sharedCache.addDatabaseToCache(db); + SHARED_CACHE.addDatabaseToCache(db); } @Override - public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { - if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabase(catName, dbName); + public Database getDatabase(String catalogName, String dbName) throws NoSuchObjectException { + if (!SHARED_CACHE.isDatabaseCachePrewarmed()) { + return rawStore.getDatabase(catalogName, dbName); } dbName = dbName.toLowerCase(); - Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + Database db = SHARED_CACHE.getDatabaseFromCache(normalizeIdentifier(catalogName), + normalizeIdentifier(dbName)); if (db == null) { - throw new NoSuchObjectException(); + throw new NoSuchObjectException("Database '" + dbName + "' in catalog '" + catalogName + "' not found in cache"); } return db; } @Override - public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.dropDatabase(catName, dbName); + public boolean dropDatabase(String catalogName, String dbName) throws NoSuchObjectException, MetaException { + boolean succ = rawStore.dropDatabase(catalogName, dbName); if (succ) { - sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + SHARED_CACHE.removeDatabaseFromCache(normalizeIdentifier(catalogName), + normalizeIdentifier(dbName)); } return succ; } @Override - public boolean alterDatabase(String catName, String dbName, Database db) + public boolean alterDatabase(String catalogName, String dbName, Database db) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.alterDatabase(catName, dbName, db); + boolean succ = rawStore.alterDatabase(catalogName, dbName, db); if (succ) { - sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), db); + SHARED_CACHE.alterDatabaseInCache(normalizeIdentifier(catalogName), + normalizeIdentifier(dbName), db); } return succ; } @Override - public List getDatabases(String catName, String pattern) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabases(catName, pattern); + public List getDatabases(String catalogName, String pattern) throws MetaException { + if (!SHARED_CACHE.isDatabaseCachePrewarmed()) { + return rawStore.getDatabases(catalogName, pattern); } - return sharedCache.listCachedDatabases(catName, pattern); + return SHARED_CACHE.listCachedDatabases(catalogName, pattern); } @Override - public List getAllDatabases(String catName) throws MetaException { - if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getAllDatabases(catName); + public List getAllDatabases(String catalogName) throws MetaException { + if (!SHARED_CACHE.isDatabaseCachePrewarmed()) { + return rawStore.getAllDatabases(catalogName); } - return sharedCache.listCachedDatabases(catName); + return SHARED_CACHE.listCachedDatabases(catalogName); } @Override @@ -828,50 +882,50 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException return; } validateTableType(tbl); - sharedCache.addTableToCache(catName, dbName, tblName, tbl); + SHARED_CACHE.addTableToCache(catName, dbName, tblName, tbl); } @Override - public boolean dropTable(String catName, String dbName, String tblName) + public boolean dropTable(String catalogName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropTable(catName, dbName, tblName); + boolean succ = rawStore.dropTable(catalogName, dbName, tableName); if (succ) { - catName = normalizeIdentifier(catName); + catalogName = normalizeIdentifier(catalogName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catalogName, dbName, tableName)) { + return true; } - sharedCache.removeTableFromCache(catName, dbName, tblName); + SHARED_CACHE.removeTableFromCache(catalogName, dbName, tableName); } return succ; } @Override - public Table getTable(String catName, String dbName, String tblName) throws MetaException { - return getTable(catName, dbName, tblName, null); + public Table getTable(String catalogName, String dbName, String tableName) throws MetaException { + return getTable(catalogName, dbName, tableName, null); } @Override - public Table getTable(String catName, String dbName, String tblName, String validWriteIds) throws MetaException { - catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getTable(catName, dbName, tblName, validWriteIds); + public Table getTable(String catalogName, String dbName, String tableName, String writeIdList) throws MetaException { + catalogName = normalizeIdentifier(catalogName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catalogName, dbName, tableName)) { + return rawStore.getTable(catalogName, dbName, tableName, writeIdList); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = SHARED_CACHE.getTableFromCache(catalogName, dbName, tableName); if (tbl == null) { // This table is not yet loaded in cache // If the prewarm thread is working on this table's database, // let's move this table to the top of tblNamesBeingPrewarmed stack, // so that it gets loaded to the cache faster and is available for subsequent requests - tblsPendingPrewarm.prioritizeTableForPrewarm(tblName); - return rawStore.getTable(catName, dbName, tblName, validWriteIds); + TABLES_PENDING_PREWARM.prioritizeTableForPrewarm(tableName); + return rawStore.getTable(catalogName, dbName, tableName, writeIdList); } - if (validWriteIds != null) { + if (writeIdList != null) { tbl.setParameters( - adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), validWriteIds)); + adjustStatsParamsForGet(tbl.getParameters(), tbl.getParameters(), tbl.getWriteId(), writeIdList)); } tbl.unsetPrivileges(); @@ -903,9 +957,9 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE String tblName = normalizeIdentifier(part.getTableName()); String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME; if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + return true; } - sharedCache.addPartitionToCache(catName, dbName, tblName, part); + SHARED_CACHE.addPartitionToCache(catName, dbName, tblName, part); } return succ; } @@ -919,9 +973,9 @@ public boolean addPartitions(String catName, String dbName, String tblName, List dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + return true; } - sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); + SHARED_CACHE.addPartitionsToCache(catName, dbName, tblName, parts); } return succ; } @@ -935,84 +989,84 @@ public boolean addPartitions(String catName, String dbName, String tblName, Part dbName = normalizeIdentifier(dbName); tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + return true; } PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(catName, dbName, tblName, part); + SHARED_CACHE.addPartitionToCache(catName, dbName, tblName, part); } } return succ; } @Override - public Partition getPartition(String catName, String dbName, String tblName, List part_vals) + public Partition getPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException, NoSuchObjectException { - return getPartition(catName, dbName, tblName, part_vals, null); + return getPartition(catName, dbName, tableName, partitionValues, null); } @Override - public Partition getPartition(String catName, String dbName, String tblName, - List part_vals, String validWriteIds) + public Partition getPartition(String catName, String dbName, String tableName, + List partitionValues, String writeIdList) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + catName, dbName, tableName, partitionValues, writeIdList); } - Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); + Partition part = SHARED_CACHE.getPartitionFromCache(catName, dbName, tableName, partitionValues); if (part == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + catName, dbName, tableName, partitionValues, writeIdList); } - if (validWriteIds != null) { - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + if (writeIdList != null) { + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (table == null) { // The table containing the partition is not yet loaded in cache return rawStore.getPartition( - catName, dbName, tblName, part_vals, validWriteIds); + catName, dbName, tableName, partitionValues, writeIdList); } part.setParameters(adjustStatsParamsForGet(table.getParameters(), - part.getParameters(), part.getWriteId(), validWriteIds)); + part.getParameters(), part.getWriteId(), writeIdList)); } return part; } @Override - public boolean doesPartitionExist(String catName, String dbName, String tblName, - List partKeys, List part_vals) + public boolean doesPartitionExist(String catName, String dbName, String tableName, + List partKeys, List partitionValues) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return rawStore.doesPartitionExist(catName, dbName, tableName, partKeys, partitionValues); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (tbl == null) { // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals); + return rawStore.doesPartitionExist(catName, dbName, tableName, partKeys, partitionValues); } - return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals); + return SHARED_CACHE.existPartitionFromCache(catName, dbName, tableName, partitionValues); } @Override - public boolean dropPartition(String catName, String dbName, String tblName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals); + boolean succ = rawStore.dropPartition(catName, dbName, tableName, partitionValues); if (succ) { catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return true; } - sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals); + SHARED_CACHE.removePartitionFromCache(catName, dbName, tableName, partitionValues); } return succ; } @@ -1022,8 +1076,8 @@ public void dropPartitions(String catName, String dbName, String tblName, List getPartitions(String catName, String dbName, String tblName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.getPartitions(catName, dbName, tblName, max); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return rawStore.getPartitions(catName, dbName, tableName, max); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (tbl == null) { // The table containing the partitions is not yet loaded in cache - return rawStore.getPartitions(catName, dbName, tblName, max); + return rawStore.getPartitions(catName, dbName, tableName, max); } - List parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max); - return parts; + return SHARED_CACHE.listCachedPartitions(catName, dbName, tableName, max); } @Override @@ -1059,58 +1112,58 @@ public void dropPartitions(String catName, String dbName, String tblName, List getTables(String catName, String dbName, String pattern) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { + if (!isBlacklistWhitelistEmpty(conf) || !IS_CACHE_PREWARMED.get()) { return rawStore.getTables(catName, dbName, pattern); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), pattern, (short) -1); + return SHARED_CACHE.listCachedTableNames(normalizeIdentifier(catName), + normalizeIdentifier(dbName), pattern, (short) -1); } @Override public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { + if (!isBlacklistWhitelistEmpty(conf) || !IS_CACHE_PREWARMED.get()) { return rawStore.getTables(catName, dbName, pattern, tableType); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName), pattern, tableType); + return SHARED_CACHE.listCachedTableNames(normalizeIdentifier(catName), + normalizeIdentifier(dbName), pattern, tableType); } @Override @@ -1123,38 +1176,38 @@ public void updateCreationMetadata(String catName, String dbname, String tablena public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { // TODO Check if all required tables are allowed, if so, get it from cache - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { + if (!isBlacklistWhitelistEmpty(conf) || !IS_CACHE_PREWARMED.get()) { return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } - return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbNames), - StringUtils.normalizeIdentifier(tableNames), tableTypes); + return SHARED_CACHE.getTableMeta(normalizeIdentifier(catName), + normalizeIdentifier(dbNames), + normalizeIdentifier(tableNames), tableTypes); } @Override - public List
getTableObjectsByName(String catName, String dbName, List tblNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { dbName = normalizeIdentifier(dbName); catName = normalizeIdentifier(catName); boolean missSomeInCache = false; - for (String tblName : tblNames) { + for (String tblName : tableNames) { tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { missSomeInCache = true; break; } } - if (!isCachePrewarmed.get() || missSomeInCache) { - return rawStore.getTableObjectsByName(catName, dbName, tblNames); + if (!IS_CACHE_PREWARMED.get() || missSomeInCache) { + return rawStore.getTableObjectsByName(catName, dbName, tableNames); } - Database db = sharedCache.getDatabaseFromCache(catName, dbName); + Database db = SHARED_CACHE.getDatabaseFromCache(catName, dbName); if (db == null) { throw new UnknownDBException("Could not find database " + dbName); } List
tables = new ArrayList<>(); - for (String tblName : tblNames) { + for (String tblName : tableNames) { tblName = normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (tbl == null) { tbl = rawStore.getTable(catName, dbName, tblName); } @@ -1168,38 +1221,38 @@ public void updateCreationMetadata(String catName, String dbname, String tablena @Override public List getAllTables(String catName, String dbName) throws MetaException { - if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { + if (!isBlacklistWhitelistEmpty(conf) || !IS_CACHE_PREWARMED.get()) { return rawStore.getAllTables(catName, dbName); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), - StringUtils.normalizeIdentifier(dbName)); + return SHARED_CACHE.listCachedTableNames(normalizeIdentifier(catName), + normalizeIdentifier(dbName)); } @Override // TODO: implement using SharedCache - public List listTableNamesByFilter(String catName, String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables); + return rawStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } @Override - public List listPartitionNames(String catName, String dbName, String tblName, - short max_parts) throws MetaException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); + public List listPartitionNames(String catName, String dbName, String tableName, + short maxPartitions) throws MetaException { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return rawStore.listPartitionNames(catName, dbName, tableName, maxPartitions); } - Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); + Table tbl = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (tbl == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); + return rawStore.listPartitionNames(catName, dbName, tableName, maxPartitions); } List partitionNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) { - if (max_parts == -1 || count < max_parts) { + for (Partition part : SHARED_CACHE.listCachedPartitions(catName, dbName, tableName, maxPartitions)) { + if (maxPartitions == -1 || count < maxPartitions) { partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); } } @@ -1207,51 +1260,52 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, + public PartitionValuesResponse listPartitionValues(String catName, String dbName, String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException { + List order, long maxParts) { throw new UnsupportedOperationException(); } @Override - public Partition alterPartition(String catName, String dbName, String tblName, - List partVals, Partition newPart, String validWriteIds) + public Partition alterPartition(String catName, String dbName, String tableName, + List partitionValues, Partition newPartition, String queryValidWriteIds) throws InvalidObjectException, MetaException { - newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds); + newPartition = rawStore.alterPartition(catName, dbName, tableName, partitionValues, newPartition, + queryValidWriteIds); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newPart; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return newPartition; } - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); - return newPart; + SHARED_CACHE.alterPartitionInCache(catName, dbName, tableName, partitionValues, newPartition); + return newPartition; } @Override - public List alterPartitions(String catName, String dbName, String tblName, - List> partValsList, List newParts, - long writeId, String validWriteIds) + public List alterPartitions(String catName, String dbName, String tableName, + List> partitionValuesList, List newPartitions, + long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException { - newParts = rawStore.alterPartitions( - catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds); + newPartitions = rawStore.alterPartitions( + catName, dbName, tableName, partitionValuesList, newPartitions, writeId, queryValidWriteIds); catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return newParts; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return newPartitions; } - sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); - return newParts; + SHARED_CACHE.alterPartitionsInCache(catName, dbName, tableName, partitionValuesList, newPartitions); + return newPartitions; } private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, - String defaultPartName, short maxParts, List result, SharedCache sharedCache) + String defaultPartName, short maxParts, List result) throws MetaException, NoSuchObjectException { List parts = - sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), - StringUtils.normalizeIdentifier(table.getDbName()), - StringUtils.normalizeIdentifier(table.getTableName()), maxParts); + SHARED_CACHE.listCachedPartitions(normalizeIdentifier(table.getCatName()), + normalizeIdentifier(table.getDbName()), + normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); } @@ -1283,22 +1337,22 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, @Override public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } - List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } + List partNames = new LinkedList<>(); boolean hasUnknownPartitions = - getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, maxParts, partNames, sharedCache); + getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartitionName, maxParts, partNames); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); + Partition part = SHARED_CACHE.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); part.unsetPrivileges(); result.add(part); } @@ -1315,20 +1369,20 @@ public int getNumPartitionsByFilter(String catName, String dbName, String tblNam public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); - List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } - getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, - sharedCache); + List partNames = new LinkedList<>(); + getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames + ); return partNames.size(); } @@ -1347,20 +1401,20 @@ public int getNumPartitionsByExpr(String catName, String dbName, String tblName, @Override public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } List partitions = new ArrayList<>(); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); + Partition part = SHARED_CACHE.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); if (part!=null) { partitions.add(part); } @@ -1534,18 +1588,18 @@ public Role getRole(String roleName) throws NoSuchObjectException { public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); + Partition p = SHARED_CACHE.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, @@ -1559,20 +1613,20 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); if (!shouldCacheTable(catName, dbName, tblName)) { return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { + for (Partition part : SHARED_CACHE.listCachedPartitions(catName, dbName, tblName, maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, @@ -1586,26 +1640,26 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN } @Override - public List listPartitionNamesPs(String catName, String dbName, String tblName, List partSpecs, - short maxParts) throws MetaException, NoSuchObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + public List listPartitionNamesPs(String catName, String dbName, String tableName, List partitionValues, + short maxPartitions) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return rawStore.listPartitionNamesPs(catName, dbName, tableName, partitionValues, maxPartitions); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNamesPs(catName, dbName, tblName, partSpecs, maxParts); + return rawStore.listPartitionNamesPs(catName, dbName, tableName, partitionValues, maxPartitions); } - String partNameMatcher = getPartNameMatcher(table, partSpecs); + String partNameMatcher = getPartNameMatcher(table, partitionValues); List partitionNames = new ArrayList<>(); - List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); + List allPartitions = SHARED_CACHE.listCachedPartitions(catName, dbName, tableName, maxPartitions); int count = 0; for (Partition part : allPartitions) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - if (partName.matches(partNameMatcher) && (maxParts == -1 || count < maxParts)) { + if (partName.matches(partNameMatcher) && (maxPartitions == -1 || count < maxPartitions)) { partitionNames.add(partName); count++; } @@ -1614,29 +1668,29 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN } @Override - public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partSpecs, - short maxParts, String userName, List groupNames) + public List listPartitionsPsWithAuth(String catName, String dbName, String tableName, List partitionValues, + short maxPartitions, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tableName, partitionValues, maxPartitions, userName, groupNames); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partSpecs, maxParts, userName, groupNames); + return rawStore.listPartitionsPsWithAuth(catName, dbName, tableName, partitionValues, maxPartitions, userName, groupNames); } - String partNameMatcher = getPartNameMatcher(table, partSpecs); + String partNameMatcher = getPartNameMatcher(table, partitionValues); List partitions = new ArrayList<>(); - List allPartitions = sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts); + List allPartitions = SHARED_CACHE.listCachedPartitions(catName, dbName, tableName, maxPartitions); int count = 0; for (Partition part : allPartitions) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - if (partName.matches(partNameMatcher) && (maxParts == -1 || count < maxParts)) { + if (partName.matches(partNameMatcher) && (maxPartitions == -1 || count < maxPartitions)) { PrincipalPrivilegeSet privs = - getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); + getPartitionPrivilegeSet(catName, dbName, tableName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); count++; @@ -1669,7 +1723,9 @@ private String getPartNameMatcher(Table table, List partSpecs) throws Me // Note: ideally this should be above both CachedStore and ObjectStore. private Map adjustStatsParamsForGet(Map tableParams, Map params, long statsWriteId, String validWriteIds) throws MetaException { - if (!TxnUtils.isTransactionalTable(tableParams)) return params; // Not a txn table. + if (!TxnUtils.isTransactionalTable(tableParams)) { + return params; // Not a txn table. + } if (areTxnStatsSupported && ((validWriteIds == null) || ObjectStore.isCurrentStatsValidForTheQuery( conf, params, statsWriteId, validWriteIds, false))) { @@ -1689,7 +1745,9 @@ private ColumnStatistics adjustColStatForGet(Map tableParams, Map params, ColumnStatistics colStat, long statsWriteId, String validWriteIds) throws MetaException { colStat.setIsStatsCompliant(true); - if (!TxnUtils.isTransactionalTable(tableParams)) return colStat; // Not a txn table. + if (!TxnUtils.isTransactionalTable(tableParams)) { + return colStat; // Not a txn table. + } if (areTxnStatsSupported && ((validWriteIds == null) || ObjectStore.isCurrentStatsValidForTheQuery( conf, params, statsWriteId, validWriteIds, false))) { @@ -1717,84 +1775,84 @@ private ColumnStatistics adjustColStatForGet(Map tableParams, if (!shouldCacheTable(catName, dbName, tblName)) { return newParams; } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return newParams; } table.setParameters(newParams); - sharedCache.alterTableInCache(catName, dbName, tblName, table); - sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj()); + SHARED_CACHE.alterTableInCache(catName, dbName, tblName, table); + SHARED_CACHE.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj()); } return newParams; } @Override - public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatistics(catName, dbName, tblName, colNames, null); + return getTableColumnStatistics(catName, dbName, tableName, colNames, null); } @Override public ColumnStatistics getTableColumnStatistics( - String catName, String dbName, String tblName, List colNames, - String validWriteIds) + String catName, String dbName, String tableName, List colNames, + String writeIdList) throws MetaException, NoSuchObjectException { - catName = StringUtils.normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { return rawStore.getTableColumnStatistics( - catName, dbName, tblName, colNames, validWriteIds); + catName, dbName, tableName, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tableName); if (table == null) { // The table is not yet loaded in cache return rawStore.getTableColumnStatistics( - catName, dbName, tblName, colNames, validWriteIds); + catName, dbName, tableName, colNames, writeIdList); } - ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); + ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tableName); List colStatObjs = - sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames); + SHARED_CACHE.getTableColStatsFromCache(catName, dbName, tableName, colNames); return adjustColStatForGet(table.getParameters(), table.getParameters(), - new ColumnStatistics(csd, colStatObjs), table.getWriteId(), validWriteIds); + new ColumnStatistics(csd, colStatObjs), table.getWriteId(), writeIdList); } @Override - public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); + boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); if (succ) { catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return true; } - sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName); + SHARED_CACHE.removeTableColStatsFromCache(catName, dbName, tableName, colName); } return succ; } @Override - public Map updatePartitionColumnStatistics(ColumnStatistics colStats, + public Map updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals, String validWriteIds, long writeId) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { Map newParams = rawStore.updatePartitionColumnStatistics( - colStats, partVals, validWriteIds, writeId); + statsObj, partVals, validWriteIds, writeId); if (newParams != null) { - String catName = colStats.getStatsDesc().isSetCatName() ? - normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; - String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); + String catName = statsObj.getStatsDesc().isSetCatName() ? + normalizeIdentifier(statsObj.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME; + String dbName = normalizeIdentifier(statsObj.getStatsDesc().getDbName()); + String tblName = normalizeIdentifier(statsObj.getStatsDesc().getTableName()); if (!shouldCacheTable(catName, dbName, tblName)) { return newParams; } Partition part = getPartition(catName, dbName, tblName, partVals); part.setParameters(newParams); - sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); - sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); + SHARED_CACHE.alterPartitionInCache(catName, dbName, tblName, partVals, part); + SHARED_CACHE.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, statsObj.getStatsObj()); } return newParams; } @@ -1817,54 +1875,70 @@ public boolean deleteTableColumnStatistics(String catName, String dbName, String } @Override - public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = - rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); + rawStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, partVals, colName); if (succ) { catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - if (!shouldCacheTable(catName, dbName, tblName)) { - return succ; + tableName = normalizeIdentifier(tableName); + if (!shouldCacheTable(catName, dbName, tableName)) { + return true; } - sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName); + SHARED_CACHE.removePartitionColStatsFromCache(catName, dbName, tableName, partVals, colName); } return succ; } @Override + @Deprecated public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { - return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, null); } @Override + @Deprecated public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { - List colStats; + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, null); + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, + List partNames, List colNames, + String writeIdList) + throws MetaException, NoSuchObjectException { catName = normalizeIdentifier(catName); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); // TODO: we currently cannot do transactional checks for stats here // (incl. due to lack of sync w.r.t. the below rawStore call). if (!shouldCacheTable(catName, dbName, tblName) || writeIdList != null) { - rawStore.get_aggr_stats_for( + rawStore.getAggrStatsFor( catName, dbName, tblName, partNames, colNames, writeIdList); } - Table table = sharedCache.getTableFromCache(catName, dbName, tblName); + Table table = SHARED_CACHE.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for( + return rawStore.getAggrStatsFor( catName, dbName, tblName, partNames, colNames, writeIdList); } List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); + List colStats; if (partNames.size() == allPartNames.size()) { - colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); + colStats = SHARED_CACHE.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1872,7 +1946,7 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); if (!partNames.contains(defaultPartitionName)) { colStats = - sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); + SHARED_CACHE.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1881,18 +1955,16 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, String tblNam LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, colNames); MergedColumnStatsForPartitions mergedColStats = - mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache); + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames); return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } private MergedColumnStatsForPartitions mergeColStatsForPartitions( - String catName, String dbName, String tblName, List partNames, List colNames, - SharedCache sharedCache) throws MetaException { - final boolean useDensityFunctionForNDVEstimation = + String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { + boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); - final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); + double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); Map> colStatsMap = new HashMap<>(); - boolean areAllPartsFound = true; long partsFound = 0; for (String colName : colNames) { long partsFoundForColumn = 0; @@ -1900,7 +1972,7 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions( List colStatsWithPartInfoList = new ArrayList<>(); for (String partName : partNames) { ColumnStatisticsObj colStatsForPart = - sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName); + SHARED_CACHE.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName); if (colStatsForPart != null) { ColStatsObjWithSourceInfo colStatsWithPartInfo = new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName); @@ -1924,22 +1996,23 @@ private MergedColumnStatsForPartitions mergeColStatsForPartitions( partsFound = partsFoundForColumn; } if (colStatsMap.size() < 1) { - LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName, + LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames={}", dbName, tblName, partNames, colNames); - return new MergedColumnStatsForPartitions(new ArrayList(), 0); + return new MergedColumnStatsForPartitions(new ArrayList<>(), 0); } } // Note that enableBitVector does not apply here because ColumnStatisticsObj // itself will tell whether bitvector is null or not and aggr logic can automatically apply. + boolean areAllPartsFound = true; return new MergedColumnStatsForPartitions(MetaStoreServerUtils.aggrPartitionStats(colStatsMap, partNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner), partsFound); } - class MergedColumnStatsForPartitions { - List colStats = new ArrayList(); - long partsFound; + final class MergedColumnStatsForPartitions { + private final List colStats; + private final long partsFound; - MergedColumnStatsForPartitions(List colStats, long partsFound) { + private MergedColumnStatsForPartitions(List colStats, long partsFound) { this.colStats = colStats; this.partsFound = partsFound; } @@ -2082,48 +2155,48 @@ public void setMetaStoreSchemaVersion(String version, String comment) @Override public void createFunction(Function func) throws InvalidObjectException, MetaException { - // TODO fucntionCache + // TODO functionCache rawStore.createFunction(func); } @Override public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { - // TODO fucntionCache + // TODO functionCache rawStore.alterFunction(catName, dbName, funcName, newFunction); } @Override public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - // TODO fucntionCache + // TODO functionCache rawStore.dropFunction(catName, dbName, funcName); } @Override public Function getFunction(String catName, String dbName, String funcName) throws MetaException { - // TODO fucntionCache + // TODO functionCache return rawStore.getFunction(catName, dbName, funcName); } @Override public List getAllFunctions(String catName) throws MetaException { - // TODO fucntionCache + // TODO functionCache return rawStore.getAllFunctions(catName); } @Override public List getFunctions(String catName, String dbName, String pattern) throws MetaException { - // TODO fucntionCache + // TODO functionCache return rawStore.getFunctions(catName, dbName, pattern); } @Override public NotificationEventResponse getNextNotification( - NotificationEventRequest rqst) { - return rawStore.getNextNotification(rqst); + NotificationEventRequest request) { + return rawStore.getNextNotification(request); } @Override @@ -2142,8 +2215,8 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { - return rawStore.getNotificationEventsCount(rqst); + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest request) { + return rawStore.getNotificationEventsCount(request); } @Override @@ -2195,46 +2268,46 @@ public int getDatabaseCount() throws MetaException { } @Override - public List getPrimaryKeys(String catName, String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String dbName, String tableName) throws MetaException { // TODO constraintCache - return rawStore.getPrimaryKeys(catName, db_name, tbl_name); + return rawStore.getPrimaryKeys(catName, dbName, tableName); } @Override - public List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + public List getForeignKeys(String catName, String parentDbName, + String parentTableName, String foreignDbName, String foreignTblName) throws MetaException { // TODO constraintCache - return rawStore.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return rawStore.getForeignKeys(catName, parentDbName, parentTableName, foreignDbName, foreignTblName); } @Override - public List getUniqueConstraints(String catName, String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getUniqueConstraints(catName, db_name, tbl_name); + return rawStore.getUniqueConstraints(catName, dbName, tblName); } @Override - public List getNotNullConstraints(String catName, String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getNotNullConstraints(catName, db_name, tbl_name); + return rawStore.getNotNullConstraints(catName, dbName, tblName); } @Override - public List getDefaultConstraints(String catName, String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getDefaultConstraints(catName, db_name, tbl_name); + return rawStore.getDefaultConstraints(catName, dbName, tblName); } @Override - public List getCheckConstraints(String catName, String db_name, String tbl_name) + public List getCheckConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO constraintCache - return rawStore.getCheckConstraints(catName, db_name, tbl_name); + return rawStore.getCheckConstraints(catName, dbName, tblName); } @Override @@ -2253,9 +2326,9 @@ public int getDatabaseCount() throws MetaException { if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } - sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), - StringUtils.normalizeIdentifier(tbl.getDbName()), - StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + SHARED_CACHE.addTableToCache(normalizeIdentifier(tbl.getCatName()), + normalizeIdentifier(tbl.getDbName()), + normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } @@ -2295,17 +2368,17 @@ public void dropConstraint(String catName, String dbName, String tableName, } @Override - public List addDefaultConstraints(List nns) + public List addDefaultConstraints(List dv) throws InvalidObjectException, MetaException { // TODO constraintCache - return rawStore.addDefaultConstraints(nns); + return rawStore.addDefaultConstraints(dv); } @Override - public List addCheckConstraints(List nns) + public List addCheckConstraints(List cc) throws InvalidObjectException, MetaException { // TODO constraintCache - return rawStore.addCheckConstraints(nns); + return rawStore.addCheckConstraints(cc); } // TODO - not clear if we should cache these or not. For now, don't bother @@ -2419,7 +2492,7 @@ public WMFullResourcePlan getResourcePlan(String name, String ns) @Override public WMFullResourcePlan alterResourcePlan(String name, String ns, WMNullableResourcePlan resourcePlan, - boolean canActivateDisabled, boolean canDeactivate, boolean isReplace) + boolean canActivateDisabled, boolean canDeactivate, boolean isReplace) throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException { return rawStore.alterResourcePlan( @@ -2512,7 +2585,7 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa } public long getCacheUpdateCount() { - return sharedCache.getUpdateCount(); + return SHARED_CACHE.getUpdateCount(); } @Override @@ -2579,8 +2652,8 @@ static boolean shouldCacheTable(String catName, String dbName, String tblName) { } static List createPatterns(String configStr) { - List patternStrs = Arrays.asList(configStr.split(",")); - List patterns = new ArrayList(); + String[] patternStrs = configStr.split(","); + List patterns = new ArrayList<>(); for (String str : patternStrs) { patterns.add(Pattern.compile(str)); } @@ -2595,7 +2668,7 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { @VisibleForTesting void resetCatalogCache() { - sharedCache.resetCatalogCache(); + SHARED_CACHE.resetCatalogCache(); setCachePrewarmedState(false); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index c24e7160ac..02930d19ed 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -27,18 +27,20 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.TreeMap; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -48,41 +50,38 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; -import org.apache.hadoop.hive.metastore.utils.StringUtils; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; - import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; public class SharedCache { - private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); - private boolean isCatalogCachePrewarmed = false; - private Map catalogCache = new TreeMap<>(); - private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); + private static final ReentrantReadWriteLock CACHE_LOCK = new ReentrantReadWriteLock(true); + private boolean isCatalogCachePrewarmed; + private final Map catalogCache = new TreeMap<>(); + private final Set catalogsDeletedDuringPrewarm = new HashSet<>(); + private final AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); // For caching Database objects. Key is database name - private Map databaseCache = new TreeMap<>(); - private boolean isDatabaseCachePrewarmed = false; - private HashSet databasesDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); + private final Map databaseCache = new TreeMap<>(); + private boolean isDatabaseCachePrewarmed; + private final Set databasesDeletedDuringPrewarm = new HashSet<>(); + private final AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); // For caching TableWrapper objects. Key is aggregate of database name and table name private Map tableCache = new TreeMap<>(); - private boolean isTableCachePrewarmed = false; - private HashSet tablesDeletedDuringPrewarm = new HashSet<>(); - private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); - private Map sdCache = new HashMap<>(); - private static MessageDigest md; - static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); - private AtomicLong cacheUpdateCount = new AtomicLong(0); + private boolean isTableCachePrewarmed; + private final Set tablesDeletedDuringPrewarm = new HashSet<>(); + private final AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); + private final Map sdCache = new HashMap<>(); + private static final MessageDigest MESSAGE_DIGEST; + private static final Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); + private final AtomicLong cacheUpdateCount = new AtomicLong(0); private static long maxCacheSizeInBytes = -1; - private static long currentCacheSizeInBytes = 0; - private static HashMap, ObjectEstimator> sizeEstimators = null; + private static long currentCacheSizeInBytes; + private static HashMap, ObjectEstimator> sizeEstimators; enum StatsType { ALL(0), ALLBUTDEFAULT(1); @@ -100,7 +99,7 @@ public int getPosition() { static { try { - md = MessageDigest.getInstance("MD5"); + MESSAGE_DIGEST = MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { throw new RuntimeException("should not happen", e); } @@ -125,46 +124,46 @@ private static ObjectEstimator getMemorySizeEstimator(Class clazz) { } static class TableWrapper { - Table t; - String location; - Map parameters; - byte[] sdHash; - ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true); + private Table table; + private String location; + private Map parameters; + private byte[] sdHash; + private final ReentrantReadWriteLock tableLock = new ReentrantReadWriteLock(true); // For caching column stats for an unpartitioned table // Key is column name and the value is the col stat object private Map tableColStatsCache = - new ConcurrentHashMap(); - private AtomicBoolean isTableColStatsCacheDirty = new AtomicBoolean(false); + new ConcurrentHashMap<>(); + private final AtomicBoolean isTableColStatsCacheDirty = new AtomicBoolean(false); // For caching partition objects // Ket is partition values and the value is a wrapper around the partition object private Map partitionCache = - new ConcurrentHashMap(); - private AtomicBoolean isPartitionCacheDirty = new AtomicBoolean(false); + new ConcurrentHashMap<>(); + private final AtomicBoolean isPartitionCacheDirty = new AtomicBoolean(false); // For caching column stats for a partitioned table // Key is aggregate of partition values, column name and the value is the col stat object private Map partitionColStatsCache = - new ConcurrentHashMap(); - private AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false); + new ConcurrentHashMap<>(); + private final AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false); // For caching aggregate column stats for all and all minus default partition // Key is column name and the value is a list of 2 col stat objects // (all partitions and all but default) private Map> aggrColStatsCache = - new ConcurrentHashMap>(); - private AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); + new ConcurrentHashMap<>(); + private final AtomicBoolean isAggrPartitionColStatsCacheDirty = new AtomicBoolean(false); - TableWrapper(Table t, byte[] sdHash, String location, Map parameters) { - this.t = t; + TableWrapper(Table table, byte[] sdHash, String location, Map parameters) { + this.table = table; this.sdHash = sdHash; this.location = location; this.parameters = parameters; } public Table getTable() { - return t; + return table; } public void setTable(Table t) { - this.t = t; + this.table = t; } public byte[] getSdHash() { @@ -192,12 +191,12 @@ public void setParameters(Map parameters) { } boolean sameDatabase(String catName, String dbName) { - return catName.equals(t.getCatName()) && dbName.equals(t.getDbName()); + return catName.equals(table.getCatName()) && dbName.equals(table.getDbName()); } void cachePartition(Partition part, SharedCache sharedCache) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); PartitionWrapper wrapper = makePartitionWrapper(part, sharedCache); partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), wrapper); isPartitionCacheDirty.set(true); @@ -211,8 +210,8 @@ void cachePartition(Partition part, SharedCache sharedCache) { } boolean cachePartitions(List parts, SharedCache sharedCache) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); for (Partition part : parts) { PartitionWrapper ptnWrapper = makePartitionWrapper(part, sharedCache); if (maxCacheSizeInBytes > 0) { @@ -223,11 +222,10 @@ boolean cachePartitions(List parts, SharedCache sharedCache) { if (isCacheMemoryFull(estimatedMemUsage)) { LOG.debug( "Cannot cache Partition: {}. Memory needed is {} bytes, whereas the memory remaining is: {} bytes.", - part, estimatedMemUsage, (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); + part, estimatedMemUsage, 0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes); return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; } + currentCacheSizeInBytes += estimatedMemUsage; LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); } partitionCache.put(CacheUtils.buildPartitionCacheKey(part.getValues()), ptnWrapper); @@ -244,9 +242,9 @@ boolean cachePartitions(List parts, SharedCache sharedCache) { } public Partition getPartition(List partVals, SharedCache sharedCache) { - Partition part = null; + tableLock.readLock().lock(); + Partition part; try { - tableLock.readLock().lock(); PartitionWrapper wrapper = partitionCache.get(CacheUtils.buildPartitionCacheKey(partVals)); if (wrapper == null) { return null; @@ -260,9 +258,9 @@ public Partition getPartition(List partVals, SharedCache sharedCache) { public List listPartitions(int max, SharedCache sharedCache) { List parts = new ArrayList<>(); - int count = 0; + tableLock.readLock().lock(); try { - tableLock.readLock().lock(); + int count = 0; for (PartitionWrapper wrapper : partitionCache.values()) { if (max == -1 || count < max) { parts.add(CacheUtils.assemble(wrapper, sharedCache)); @@ -276,9 +274,9 @@ public Partition getPartition(List partVals, SharedCache sharedCache) { } public boolean containsPartition(List partVals) { - boolean containsPart = false; + tableLock.readLock().lock(); + boolean containsPart; try { - tableLock.readLock().lock(); containsPart = partitionCache.containsKey(CacheUtils.buildPartitionCacheKey(partVals)); } finally { tableLock.readLock().unlock(); @@ -287,9 +285,9 @@ public boolean containsPartition(List partVals) { } public Partition removePartition(List partVal, SharedCache sharedCache) { - Partition part = null; + tableLock.writeLock().lock(); + Partition part; try { - tableLock.writeLock().lock(); PartitionWrapper wrapper = partitionCache.remove(CacheUtils.buildPartitionCacheKey(partVal)); isPartitionCacheDirty.set(true); @@ -319,8 +317,8 @@ public Partition removePartition(List partVal, SharedCache sharedCache) } public void removePartitions(List> partVals, SharedCache sharedCache) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); for (List partVal : partVals) { removePartition(partVal, sharedCache); } @@ -330,8 +328,8 @@ public void removePartitions(List> partVals, SharedCache sharedCach } public void alterPartition(List partVals, Partition newPart, SharedCache sharedCache) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); removePartition(partVals, sharedCache); cachePartition(newPart, sharedCache); } finally { @@ -341,8 +339,8 @@ public void alterPartition(List partVals, Partition newPart, SharedCache public void alterPartitions(List> partValsList, List newParts, SharedCache sharedCache) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); for (int i = 0; i < partValsList.size(); i++) { List partVals = partValsList.get(i); Partition newPart = newParts.get(i); @@ -354,13 +352,13 @@ public void alterPartitions(List> partValsList, List new } public void refreshPartitions(List partitions, SharedCache sharedCache) { - Map newPartitionCache = new HashMap(); + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); + Map newPartitionCache = new HashMap<>(); for (Partition part : partitions) { if (isPartitionCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition cache update for table: " + getTable().getTableName() - + "; the partition list we have is dirty."); + LOG.debug("Skipping partition cache update for table: {}; the partition list we have is dirty.", + getTable().getTableName()); return; } String key = CacheUtils.buildPartitionCacheKey(part.getValues()); @@ -380,8 +378,8 @@ public void refreshPartitions(List partitions, SharedCache sharedCach } public boolean updateTableColStats(List colStatsForTable) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); for (ColumnStatisticsObj colStatObj : colStatsForTable) { // Get old stats object if present String key = colStatObj.getColName(); @@ -404,11 +402,10 @@ public boolean updateTableColStats(List colStatsForTable) { "Cannot cache Table Column Statistics Object: {}. Memory needed is {} bytes, " + "whereas the memory remaining is: {} bytes.", colStatObj, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); + 0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes); return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; } + currentCacheSizeInBytes += estimatedMemUsage; LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); } tableColStatsCache.put(key, colStatObj.deepCopy()); @@ -422,14 +419,13 @@ public boolean updateTableColStats(List colStatsForTable) { } public void refreshTableColStats(List colStatsForTable) { - Map newTableColStatsCache = - new HashMap(); + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); + Map newTableColStatsCache = new HashMap<>(); for (ColumnStatisticsObj colStatObj : colStatsForTable) { if (isTableColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping table col stats cache update for table: " - + getTable().getTableName() + "; the table col stats list we have is dirty."); + LOG.debug("Skipping table col stats cache update for table: {}; the table col stats list we have is dirty.", + getTable().getTableName()); return; } String key = colStatObj.getColName(); @@ -443,9 +439,9 @@ public void refreshTableColStats(List colStatsForTable) { } public List getCachedTableColStats(List colNames) { - List colStatObjs = new ArrayList(); + List colStatObjs = new ArrayList<>(); + tableLock.readLock().lock(); try { - tableLock.readLock().lock(); for (String colName : colNames) { ColumnStatisticsObj colStatObj = tableColStatsCache.get(colName); if (colStatObj != null) { @@ -459,8 +455,8 @@ public void refreshTableColStats(List colStatsForTable) { } public void removeTableColStats(String colName) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); if (colName == null) { tableColStatsCache.clear(); } else { @@ -473,8 +469,8 @@ public void removeTableColStats(String colName) { } public void removeAllTableColStats() { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); tableColStatsCache.clear(); isTableColStatsCacheDirty.set(true); } finally { @@ -483,8 +479,8 @@ public void removeAllTableColStats() { } public ColumnStatisticsObj getPartitionColStats(List partVal, String colName) { + tableLock.readLock().lock(); try { - tableLock.readLock().lock(); return partitionColStatsCache .get(CacheUtils.buildPartitonColStatsCacheKey(partVal, colName)); } finally { @@ -494,8 +490,8 @@ public ColumnStatisticsObj getPartitionColStats(List partVal, String col public boolean updatePartitionColStats(List partVal, List colStatsObjs) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); for (ColumnStatisticsObj colStatObj : colStatsObjs) { // Get old stats object if present String key = CacheUtils.buildPartitonColStatsCacheKey(partVal, colStatObj.getColName()); @@ -518,11 +514,10 @@ public boolean updatePartitionColStats(List partVal, "Cannot cache Partition Column Statistics Object: {}. Memory needed is {} bytes, " + "whereas the memory remaining is: {} bytes.", colStatObj, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); + 0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes); return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; } + currentCacheSizeInBytes += estimatedMemUsage; LOG.trace("Current cache size: {} bytes", currentCacheSizeInBytes); } partitionColStatsCache.put(key, colStatObj.deepCopy()); @@ -540,8 +535,8 @@ public boolean updatePartitionColStats(List partVal, } public void removePartitionColStats(List partVals, String colName) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); partitionColStatsCache.remove(CacheUtils.buildPartitonColStatsCacheKey(partVals, colName)); isPartitionColStatsCacheDirty.set(true); // Invalidate cached aggregate stats @@ -554,8 +549,8 @@ public void removePartitionColStats(List partVals, String colName) { } public void removeAllPartitionColStats() { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); partitionColStatsCache.clear(); isPartitionColStatsCacheDirty.set(true); // Invalidate cached aggregate stats @@ -568,25 +563,25 @@ public void removeAllPartitionColStats() { } public void refreshPartitionColStats(List partitionColStats) { - Map newPartitionColStatsCache = - new HashMap(); + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); - String tableName = StringUtils.normalizeIdentifier(getTable().getTableName()); + String tableName = normalizeIdentifier(getTable().getTableName()); + Map newPartitionColStatsCache = new HashMap<>(); for (ColumnStatistics cs : partitionColStats) { if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " - + getTable().getTableName() + "; the partition column stats list we have is dirty"); + LOG.debug( + "Skipping partition column stats cache update for table: {}; the partition column stats list we" + + " have is dirty", getTable().getTableName()); return; } - List partVal; try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); + List partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); List colStatsObjs = cs.getStatsObj(); for (ColumnStatisticsObj colStatObj : colStatsObjs) { if (isPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping partition column stats cache update for table: " - + getTable().getTableName() + "; the partition column list we have is dirty"); + LOG.debug( + "Skipping partition column stats cache update for table: {}; the partition column list we" + + " have is dirty", getTable().getTableName()); return; } String key = @@ -594,7 +589,7 @@ public void refreshPartitionColStats(List partitionColStats) { newPartitionColStatsCache.put(key, colStatObj.deepCopy()); } } catch (MetaException e) { - LOG.debug("Unable to cache partition column stats for table: " + tableName, e); + LOG.debug("Unable to cache partition column stats for table: {}", tableName, e); } } partitionColStatsCache = newPartitionColStatsCache; @@ -605,9 +600,9 @@ public void refreshPartitionColStats(List partitionColStats) { public List getAggrPartitionColStats(List colNames, StatsType statsType) { - List colStats = new ArrayList(); + List colStats = new ArrayList<>(); + tableLock.readLock().lock(); try { - tableLock.readLock().lock(); for (String colName : colNames) { List colStatList = aggrColStatsCache.get(colName); // If unable to find stats for a column, return null so we can build stats @@ -629,12 +624,12 @@ public void refreshPartitionColStats(List partitionColStats) { public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); if (aggrStatsAllPartitions != null) { for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) { if (statObj != null) { - List aggrStats = new ArrayList(); + List aggrStats = new ArrayList<>(); aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy()); aggrColStatsCache.put(statObj.getColName(), aggrStats); } @@ -645,7 +640,7 @@ public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, if (statObj != null) { List aggrStats = aggrColStatsCache.get(statObj.getColName()); if (aggrStats == null) { - aggrStats = new ArrayList(); + aggrStats = new ArrayList<>(); } aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy()); } @@ -659,19 +654,19 @@ public void cacheAggrPartitionColStats(AggrStats aggrStatsAllPartitions, public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { - Map> newAggrColStatsCache = - new HashMap>(); + tableLock.writeLock().lock(); try { - tableLock.writeLock().lock(); + Map> newAggrColStatsCache = new HashMap<>(); if (aggrStatsAllPartitions != null) { for (ColumnStatisticsObj statObj : aggrStatsAllPartitions.getColStats()) { if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping aggregate stats cache update for table: " - + getTable().getTableName() + "; the aggregate stats list we have is dirty"); + LOG.debug( + "Skipping aggregate stats cache update for table: {}; the aggregate stats list we have is dirty", + getTable().getTableName()); return; } if (statObj != null) { - List aggrStats = new ArrayList(); + List aggrStats = new ArrayList<>(); aggrStats.add(StatsType.ALL.ordinal(), statObj.deepCopy()); newAggrColStatsCache.put(statObj.getColName(), aggrStats); } @@ -680,14 +675,15 @@ public void refreshAggrPartitionColStats(AggrStats aggrStatsAllPartitions, if (aggrStatsAllButDefaultPartition != null) { for (ColumnStatisticsObj statObj : aggrStatsAllButDefaultPartition.getColStats()) { if (isAggrPartitionColStatsCacheDirty.compareAndSet(true, false)) { - LOG.debug("Skipping aggregate stats cache update for table: " - + getTable().getTableName() + "; the aggregate stats list we have is dirty"); + LOG.debug( + "Skipping aggregate stats cache update for table: {}; the aggregate stats list we have is dirty", + getTable().getTableName()); return; } if (statObj != null) { List aggrStats = newAggrColStatsCache.get(statObj.getColName()); if (aggrStats == null) { - aggrStats = new ArrayList(); + aggrStats = new ArrayList<>(); } aggrStats.add(StatsType.ALLBUTDEFAULT.ordinal(), statObj.deepCopy()); } @@ -708,12 +704,12 @@ private void updateTableObj(Table newTable, SharedCache sharedCache) { Table tblCopy = newTable.deepCopy(); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { - fs.setName(StringUtils.normalizeIdentifier(fs.getName())); + fs.setName(normalizeIdentifier(fs.getName())); } } setTable(tblCopy); if (tblCopy.getSd() != null) { - sdHash = MetaStoreServerUtils.hashStorageDescriptor(tblCopy.getSd(), md); + sdHash = MetaStoreServerUtils.hashStorageDescriptor(tblCopy.getSd(), MESSAGE_DIGEST); StorageDescriptor sd = tblCopy.getSd(); sharedCache.increSd(sd, sdHash); tblCopy.setSd(null); @@ -731,7 +727,7 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared Partition partCopy = part.deepCopy(); PartitionWrapper wrapper; if (part.getSd() != null) { - byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(part.getSd(), md); + byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(part.getSd(), MESSAGE_DIGEST); StorageDescriptor sd = part.getSd(); sharedCache.increSd(sd, sdHash); partCopy.setSd(null); @@ -744,20 +740,20 @@ private PartitionWrapper makePartitionWrapper(Partition part, SharedCache shared } static class PartitionWrapper { - Partition p; - String location; - Map parameters; - byte[] sdHash; + private final Partition partition; + private final String location; + private final Map parameters; + private final byte[] sdHash; - PartitionWrapper(Partition p, byte[] sdHash, String location, Map parameters) { - this.p = p; + PartitionWrapper(Partition partition, byte[] sdHash, String location, Map parameters) { + this.partition = partition; this.sdHash = sdHash; this.location = location; this.parameters = parameters; } public Partition getPartition() { - return p; + return partition; } public byte[] getSdHash() { @@ -774,8 +770,8 @@ public String getLocation() { } static class StorageDescriptorWrapper { - StorageDescriptor sd; - int refCount = 0; + private final StorageDescriptor sd; + private int refCount; StorageDescriptorWrapper(StorageDescriptor sd, int refCount) { this.sd = sd; @@ -796,11 +792,11 @@ public void populateCatalogsInCache(Collection catalogs) { Catalog catCopy = cat.deepCopy(); // ObjectStore also stores db name in lowercase catCopy.setName(catCopy.getName().toLowerCase()); + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object + // 2. Skip overwriting existing db object // (which is present because it was added after prewarm started) if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) { continue; @@ -809,51 +805,51 @@ public void populateCatalogsInCache(Collection catalogs) { catalogsDeletedDuringPrewarm.clear(); isCatalogCachePrewarmed = true; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } } public Catalog getCatalogFromCache(String name) { Catalog cat = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); if (catalogCache.get(name) != null) { cat = catalogCache.get(name).deepCopy(); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return cat; } public void addCatalogToCache(Catalog cat) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); Catalog catCopy = cat.deepCopy(); // ObjectStore also stores db name in lowercase catCopy.setName(catCopy.getName().toLowerCase()); catalogCache.put(cat.getName(), catCopy); isCatalogCacheDirty.set(true); } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public void alterCatalogInCache(String catName, Catalog newCat) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); removeCatalogFromCache(catName); addCatalogToCache(newCat.deepCopy()); } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public void removeCatalogFromCache(String name) { name = normalizeIdentifier(name); + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back if (!isCatalogCachePrewarmed) { @@ -863,16 +859,16 @@ public void removeCatalogFromCache(String name) { isCatalogCacheDirty.set(true); } } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public List listCachedCatalogs() { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); return new ArrayList<>(catalogCache.keySet()); } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } @@ -882,14 +878,14 @@ public boolean isCatalogCachePrewarmed() { public Database getDatabaseFromCache(String catName, String name) { Database db = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); String key = CacheUtils.buildDbKey(catName, name); if (databaseCache.get(key) != null) { db = databaseCache.get(key).deepCopy(); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return db; } @@ -899,11 +895,11 @@ public void populateDatabasesInCache(List databases) { Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add databases that were deleted while we were preparing list for prewarm - // 2. Skip overwriting exisiting db object + // 2. Skip overwriting existing db object // (which is present because it was added after prewarm started) String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); @@ -914,7 +910,7 @@ public void populateDatabasesInCache(List databases) { databasesDeletedDuringPrewarm.clear(); isDatabaseCachePrewarmed = true; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } } @@ -924,8 +920,8 @@ public boolean isDatabaseCachePrewarmed() { } public void addDatabaseToCache(Database db) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); @@ -933,13 +929,13 @@ public void addDatabaseToCache(Database db) { databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); isDatabaseCacheDirty.set(true); } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public void removeDatabaseFromCache(String catName, String dbName) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back String key = CacheUtils.buildDbKey(catName, dbName); @@ -950,40 +946,40 @@ public void removeDatabaseFromCache(String catName, String dbName) { isDatabaseCacheDirty.set(true); } } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public List listCachedDatabases(String catName) { List results = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (String pair : databaseCache.keySet()) { String[] n = CacheUtils.splitDbName(pair); if (catName.equals(n[0])) results.add(n[1]); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return results; } public List listCachedDatabases(String catName, String pattern) { List results = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (String pair : databaseCache.keySet()) { String[] n = CacheUtils.splitDbName(pair); if (catName.equals(n[0])) { - n[1] = StringUtils.normalizeIdentifier(n[1]); + n[1] = normalizeIdentifier(n[1]); if (CacheUtils.matches(n[1], pattern)) { results.add(n[1]); } } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return results; } @@ -993,19 +989,19 @@ public void removeDatabaseFromCache(String catName, String dbName) { * not exist. */ public void alterDatabaseInCache(String catName, String dbName, Database newDb) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); removeDatabaseFromCache(catName, dbName); addDatabaseToCache(newDb.deepCopy()); isDatabaseCacheDirty.set(true); } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public void refreshDatabasesInCache(List databases) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); if (isDatabaseCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping database cache update; the database list we have is dirty."); return; @@ -1015,25 +1011,25 @@ public void refreshDatabasesInCache(List databases) { addDatabaseToCache(db); } } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public int getCachedDatabaseCount() { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); return databaseCache.size(); } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { - String catName = StringUtils.normalizeIdentifier(table.getCatName()); - String dbName = StringUtils.normalizeIdentifier(table.getDbName()); - String tableName = StringUtils.normalizeIdentifier(table.getTableName()); + String catName = normalizeIdentifier(table.getCatName()); + String dbName = normalizeIdentifier(table.getDbName()); + String tableName = normalizeIdentifier(table.getTableName()); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add tables that were deleted while we were preparing list for prewarm if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { @@ -1050,11 +1046,10 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, "Cannot cache Database: {}'s Table: {}. Memory needed is {} bytes, " + "whereas the memory we have remaining is: {} bytes.", dbName, tableName, estimatedMemUsage, - (0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes)); + 0.8 * maxCacheSizeInBytes - currentCacheSizeInBytes); return false; - } else { - currentCacheSizeInBytes += estimatedMemUsage; } + currentCacheSizeInBytes += estimatedMemUsage; LOG.debug("Current cache size: {} bytes", currentCacheSizeInBytes); } if (!table.isSetPartitionKeys() && (tableColStats != null)) { @@ -1070,76 +1065,74 @@ public boolean populateTableInCache(Table table, ColumnStatistics tableColStats, } if (partitionColStats != null) { for (ColumnStatistics cs : partitionColStats) { - List partVal; try { - partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); + List partVal = Warehouse.makeValsFromName(cs.getStatsDesc().getPartName(), null); List colStats = cs.getStatsObj(); if (!tblWrapper.updatePartitionColStats(partVal, colStats)) { return false; } } catch (MetaException e) { - LOG.debug("Unable to cache partition column stats for table: " + tableName, e); + LOG.debug("Unable to cache partition column stats for table: {}", tableName, e); } } } tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); - // 2. Skip overwriting exisiting table object + // 2. Skip overwriting existing table object // (which is present because it was added after prewarm started) tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper); return true; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } private static boolean isCacheMemoryFull(long estimatedMemUsage) { - return (0.8*maxCacheSizeInBytes) < (currentCacheSizeInBytes + estimatedMemUsage); + return (0.8 * maxCacheSizeInBytes) < (currentCacheSizeInBytes + estimatedMemUsage); } public void completeTableCachePrewarm() { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); tablesDeletedDuringPrewarm.clear(); isTableCachePrewarmed = true; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public Table getTableFromCache(String catName, String dbName, String tableName) { Table t = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { t = CacheUtils.assemble(tblWrapper, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return t; } public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); isTableCacheDirty.set(true); return wrapper; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { - TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); tblCopy.setCatName(normalizeIdentifier(catName)); tblCopy.setDbName(normalizeIdentifier(dbName)); @@ -1149,8 +1142,9 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb fs.setName(normalizeIdentifier(fs.getName())); } } + TableWrapper wrapper; if (tbl.getSd() != null) { - byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(tbl.getSd(), md); + byte[] sdHash = MetaStoreServerUtils.hashStorageDescriptor(tbl.getSd(), MESSAGE_DIGEST); StorageDescriptor sd = tbl.getSd(); increSd(sd, sdHash); tblCopy.setSd(null); @@ -1162,8 +1156,8 @@ private TableWrapper createTableWrapper(String catName, String dbName, String tb } public void removeTableFromCache(String catName, String dbName, String tblName) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); // If table cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back if (!isTableCachePrewarmed) { @@ -1177,53 +1171,53 @@ public void removeTableFromCache(String catName, String dbName, String tblName) } isTableCacheDirty.set(true); } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(newTable, this); - String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); - String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); + String newDbName = normalizeIdentifier(newTable.getDbName()); + String newTblName = normalizeIdentifier(newTable.getTableName()); tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper); isTableCacheDirty.set(true); } } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public List
listCachedTables(String catName, String dbName) { List
tables = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.sameDatabase(catName, dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return tables; } public List listCachedTableNames(String catName, String dbName) { List tableNames = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.sameDatabase(catName, dbName)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); + tableNames.add(normalizeIdentifier(wrapper.getTable().getTableName())); } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return tableNames; } @@ -1231,19 +1225,19 @@ public void alterTableInCache(String catName, String dbName, String tblName, Tab public List listCachedTableNames(String catName, String dbName, String pattern, short maxTables) { List tableNames = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); int count = 0; for (TableWrapper wrapper : tableCache.values()) { if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && (maxTables == -1 || count < maxTables)) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); + tableNames.add(normalizeIdentifier(wrapper.getTable().getTableName())); count++; } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return tableNames; } @@ -1251,31 +1245,31 @@ public void alterTableInCache(String catName, String dbName, String tblName, Tab public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType) { List tableNames = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && wrapper.getTable().getTableType().equals(tableType.toString())) { - tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); + tableNames.add(normalizeIdentifier(wrapper.getTable().getTableName())); } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return tableNames; } public void refreshTablesInCache(String catName, String dbName, List
tables) { + CACHE_LOCK.writeLock().lock(); try { - cacheLock.writeLock().lock(); if (isTableCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping table cache update; the table list we have is dirty."); return; } Map newTableCache = new HashMap<>(); for (Table tbl : tables) { - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); + String tblName = normalizeIdentifier(tbl.getTableName()); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { @@ -1288,92 +1282,92 @@ public void refreshTablesInCache(String catName, String dbName, List
tabl tableCache.clear(); tableCache = newTableCache; } finally { - cacheLock.writeLock().unlock(); + CACHE_LOCK.writeLock().unlock(); } } public List getTableColStatsFromCache(String catName, String dbName, String tblName, List colNames) { List colStatObjs = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObjs = tblWrapper.getCachedTableColStats(colNames); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return colStatObjs; } public void removeTableColStatsFromCache(String catName, String dbName, String tblName, String colName) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeTableColStats(colName); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void removeAllTableColStatsFromCache(String catName, String dbName, String tblName) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeAllTableColStats(); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void updateTableColStatsInCache(String catName, String dbName, String tableName, List colStatsForTable) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updateTableColStats(colStatsForTable); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void refreshTableColStatsInCache(String catName, String dbName, String tableName, List colStatsForTable) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.refreshTableColStats(colStatsForTable); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public int getCachedTableCount() { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); return tableCache.size(); } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) { List tableMetas = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); for (String dbName : listCachedDatabases(catName)) { if (CacheUtils.matches(dbName, dbNames)) { for (Table table : listCachedTables(catName, dbName)) { @@ -1390,47 +1384,47 @@ public int getCachedTableCount() { } } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return tableMetas; } public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartition(part, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void addPartitionsToCache(String catName, String dbName, String tblName, List parts) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartitions(parts, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public Partition getPartitionFromCache(String catName, String dbName, String tblName, List partVals) { Partition part = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.getPartition(partVals, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return part; } @@ -1438,14 +1432,14 @@ public Partition getPartitionFromCache(String catName, String dbName, String tbl public boolean existPartitionFromCache(String catName, String dbName, String tblName, List partVals) { boolean existsPart = false; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { existsPart = tblWrapper.containsPartition(partVals); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return existsPart; } @@ -1453,191 +1447,191 @@ public boolean existPartitionFromCache(String catName, String dbName, String tbl public Partition removePartitionFromCache(String catName, String dbName, String tblName, List partVals) { Partition part = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.removePartition(partVals, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return part; } public void removePartitionsFromCache(String catName, String dbName, String tblName, List> partVals) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitions(partVals, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public List listCachedPartitions(String catName, String dbName, String tblName, int max) { - List parts = new ArrayList(); + List parts = new ArrayList<>(); + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { parts = tblWrapper.listPartitions(max, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return parts; } public void alterPartitionInCache(String catName, String dbName, String tblName, List partVals, Partition newPart) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartition(partVals, newPart, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void alterPartitionsInCache(String catName, String dbName, String tblName, List> partValsList, List newParts) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartitions(partValsList, newParts, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitions(partitions, this); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, List partVals, String colName) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitionColStats(partVals, colName); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void removeAllPartitionColStatsFromCache(String catName, String dbName, String tblName) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeAllPartitionColStats(); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, List partVals, List colStatsObjs) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public ColumnStatisticsObj getPartitionColStatsFromCache(String catName, String dbName, String tblName, List partVal, String colName) { ColumnStatisticsObj colStatObj = null; + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObj = tblWrapper.getPartitionColStats(partVal, colName); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return colStatObj; } public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName, List partitionColStats) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitionColStats(partitionColStats); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public List getAggrStatsFromCache(String catName, String dbName, String tblName, List colNames, StatsType statsType) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { return tblWrapper.getAggrPartitionColStats(colNames, statsType); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } return null; } public void addAggregateStatsToCache(String catName, String dbName, String tblName, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } public void refreshAggregateStatsInCache(String catName, String dbName, String tblName, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + CACHE_LOCK.readLock().lock(); try { - cacheLock.readLock().lock(); TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } } finally { - cacheLock.readLock().unlock(); + CACHE_LOCK.readLock().unlock(); } } @@ -1683,7 +1677,7 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { } /** - * This resets the contents of the cataog cache so that we can re-fill it in another test. + * This resets the contents of the catalog cache so that we can re-fill it in another test. */ void resetCatalogCache() { isCatalogCachePrewarmed = false; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index c13e538bc4..22cfe13fde 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -274,23 +274,23 @@ public boolean addPartition(Partition part) } @Override - public Partition getPartition(String catName, String dbName, String tableName, List partVals) + public Partition getPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals); + return objectStore.getPartition(catName, dbName, tableName, partitionValues); } @Override public Partition getPartition(String catName, String dbName, String tableName, - List partVals, String writeIdList) + List partitionValues, String writeIdList) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(catName, dbName, tableName, partVals, writeIdList); + return objectStore.getPartition(catName, dbName, tableName, partitionValues, writeIdList); } @Override - public boolean dropPartition(String catName, String dbName, String tableName, List partVals) + public boolean dropPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - return objectStore.dropPartition(catName, dbName, tableName, partVals); + return objectStore.dropPartition(catName, dbName, tableName, partitionValues); } @Override @@ -306,16 +306,16 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public Table alterTable(String catName, String dbName, String name, Table newTable, + public Table alterTable(String catName, String dbName, String tableName, Table newTable, String queryValidWriteIds) throws InvalidObjectException, MetaException { - return objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds); + return objectStore.alterTable(catName, dbName, tableName, newTable, queryValidWriteIds); } @Override - public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) throws MetaException { - objectStore.updateCreationMetadata(catName, dbname, tablename, cm); + objectStore.updateCreationMetadata(catName, dbName, tableName, cm); } @Override @@ -358,30 +358,30 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tableName, short maxPartitions) throws MetaException { - return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tableName, maxPartitions); } @Override - public PartitionValuesResponse listPartitionValues(String catName, String db_name, - String tbl_name, List cols, boolean applyDistinct, String filter, + public PartitionValuesResponse listPartitionValues(String catName, String dbName, + String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { return null; } @Override - public Partition alterPartition(String catName, String dbName, String tblName, List partVals, - Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException { - return objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds); + public Partition alterPartition(String catName, String dbName, String tableName, List partitionValues, + Partition newPartition, String queryValidWriteIds) throws InvalidObjectException, MetaException { + return objectStore.alterPartition(catName, dbName, tableName, partitionValues, newPartition, queryValidWriteIds); } @Override - public List alterPartitions(String catName, String dbName, String tblName, - List> partValsList, List newParts, + public List alterPartitions(String catName, String dbName, String tableName, + List> partitionValuesList, List newPartitions, long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException { return objectStore.alterPartitions( - catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds); + catName, dbName, tableName, partitionValuesList, newPartitions, writeId, queryValidWriteIds); } @Override @@ -601,17 +601,17 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN } @Override - public List listPartitionNamesPs(String catName, String dbName, String tblName, - List partVals, short maxParts) + public List listPartitionNamesPs(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tableName, partitionValues, maxPartitions); } @Override - public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, - List partVals, short maxParts, String userName, List groupNames) + public List listPartitionsPsWithAuth(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, + return objectStore.listPartitionsPsWithAuth(catName, dbName, tableName, partitionValues, maxPartitions, userName, groupNames); } @@ -801,9 +801,9 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partitionValues) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partitionValues); } @Override @@ -861,14 +861,30 @@ public Function getFunction(String catName, String dbName, String funcName) } @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, + @Deprecated + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, null); + } + + @Override + @Deprecated + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + String writeIdList) + throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; } @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) @@ -877,8 +893,8 @@ public AggrStats get_aggr_stats_for(String catName, String dbName, } @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - return objectStore.getNextNotification(rqst); + public NotificationEventResponse getNextNotification(NotificationEventRequest request) { + return objectStore.getNextNotification(request); } @Override @@ -897,8 +913,8 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { - return objectStore.getNotificationEventsCount(rqst); + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest request) { + return objectStore.getNotificationEventsCount(request); } @Override @@ -948,43 +964,43 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String catName, String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String dbName, String tableName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + public List getForeignKeys(String catName, String parentDbName, + String parentTableName, String foreignDbName, String foreignTblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getUniqueConstraints(String catName, String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String catName, String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String catName, String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getCheckConstraints(String catName, String db_name, String tbl_name) + public List getCheckConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index e943f17a36..81da12a98c 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -179,19 +179,19 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } @Override - public Database getDatabase(String catName, String name) throws NoSuchObjectException { + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { return null; } @Override - public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException { + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { return false; } @Override - public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException, + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { return false; @@ -258,21 +258,21 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE } @Override - public Partition getPartition(String catName, String dbName, String tableName, List part_vals) + public Partition getPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException, NoSuchObjectException { return null; } @Override - public Partition getPartition(String catName, String dbName, String tableName, List part_vals, + public Partition getPartition(String catName, String dbName, String tableName, List partitionValues, String writeIdList) throws MetaException, NoSuchObjectException { return null; } @Override - public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tableName, List partitionValues) throws MetaException { return false; @@ -292,13 +292,13 @@ public boolean dropPartition(String catName, String dbName, String tableName, Li } @Override - public Table alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds) + public Table alterTable(String catName, String dbName, String tableName, Table newTable, String queryValidWriteIds) throws InvalidObjectException, MetaException { return newTable; } @Override - public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) throws MetaException { } @@ -325,7 +325,7 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List
getTableObjectsByName(String catName, String dbname, List tableNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { return Collections.emptyList(); @@ -338,22 +338,22 @@ public void updateCreationMetadata(String catName, String dbname, String tablena } @Override - public List listTableNamesByFilter(String catName, String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { return Collections.emptyList(); } @Override - public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) + public List listPartitionNames(String catName, String dbName, String tableName, short maxPartitions) throws MetaException { return Collections.emptyList(); } @Override - public PartitionValuesResponse listPartitionValues(String catName, String db_name, - String tbl_name, List cols, + public PartitionValuesResponse listPartitionValues(String catName, String dbName, + String tableName, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { @@ -361,16 +361,16 @@ public PartitionValuesResponse listPartitionValues(String catName, String db_nam } @Override - public Partition alterPartition(String catName, String db_name, String tbl_name, List part_vals, - Partition new_part, String queryValidWriteIds) throws InvalidObjectException, MetaException { - return new_part; + public Partition alterPartition(String catName, String dbName, String tableName, List partitionValues, + Partition newPartition, String queryValidWriteIds) throws InvalidObjectException, MetaException { + return newPartition; } @Override - public List alterPartitions(String catName, String db_name, String tbl_name, - List> part_vals_list, List new_parts, + public List alterPartitions(String catName, String dbName, String tableName, + List> partitionValuesList, List newPartitions, long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException { - return new_parts; + return newPartitions; } @Override @@ -589,7 +589,7 @@ public Role getRole(String roleName) throws NoSuchObjectException { @Override public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, - String user_name, List group_names) throws MetaException, NoSuchObjectException, + String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { return null; @@ -604,15 +604,15 @@ public Partition getPartitionWithAuth(String catName, String dbName, String tblN } @Override - public List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, - short max_parts) throws MetaException, NoSuchObjectException { + public List listPartitionNamesPs(String catName, String dbName, String tableName, List partitionValues, + short maxPartitions) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, - List part_vals, short max_parts, String userName, List groupNames) + public List listPartitionsPsWithAuth(String catName, String dbName, String tableName, + List partitionValues, short maxPartitions, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { return Collections.emptyList(); @@ -725,13 +725,13 @@ public boolean removeMasterKey(Integer keySeq) { @Override public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, - List colName) throws MetaException, NoSuchObjectException { + List colNames) throws MetaException, NoSuchObjectException { return null; } @Override public ColumnStatistics getTableColumnStatistics( - String catName, String dbName, String tableName, List colName, + String catName, String dbName, String tableName, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { return null; @@ -798,7 +798,7 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met @Override public boolean doesPartitionExist(String catName, String dbName, String tableName, - List partKeys, List partVals) + List partKeys, List partitionValues) throws MetaException, NoSuchObjectException { return false; } @@ -853,14 +853,30 @@ public Function getFunction(String catName, String dbName, String funcName) } @Override - public AggrStats get_aggr_stats_for(String catName, String dbName, + @Deprecated + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, + List colNames) throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, null); + } + + @Override + @Deprecated + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, + List partNames, List colNames, + String writeIdList) + throws MetaException, NoSuchObjectException { + return getAggrStatsFor(catName, dbName, tblName, partNames, colNames, writeIdList); + } + + @Override + public AggrStats getAggrStatsFor(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; } @Override - public AggrStats get_aggr_stats_for( + public AggrStats getAggrStatsFor( String catName, String dbName, String tblName, List partNames, List colNames, String writeIdList) throws MetaException, NoSuchObjectException { @@ -868,7 +884,7 @@ public AggrStats get_aggr_stats_for( } @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + public NotificationEventResponse getNextNotification(NotificationEventRequest request) { return null; } @@ -888,7 +904,7 @@ public CurrentNotificationEventId getCurrentNotificationEventId() { } @Override - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) { + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest request) { return null; } @@ -938,43 +954,43 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String catName, String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String dbName, String tableName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String catName, String parent_db_name, - String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + public List getForeignKeys(String catName, String parentDbName, + String parentTableName, String foreignDbName, String foreignTblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getUniqueConstraints(String catName, String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String catName, String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String catName, String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getCheckConstraints(String catName, String db_name, String tbl_name) + public List getCheckConstraints(String catName, String dbName, String tblName) throws MetaException { // TODO Auto-generated method stub return null; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index 9daff370ef..b2336649c7 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -161,34 +161,34 @@ public Table getTable(String catName, String dbName, String tableName, String wr @Override public Partition getPartition(String catName, String dbName, String tableName, - List partVals) throws NoSuchObjectException, MetaException { - return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals)); + List partitionValues) throws NoSuchObjectException, MetaException { + return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partitionValues)); } @Override - public List listPartitionNames(String catName, String dbName, String tableName, short max) + public List listPartitionNames(String catName, String dbName, String tableName, short maxPartitions) throws MetaException { - return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max)); + return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, maxPartitions)); } @Override - public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { - return getNextNotificationModifier.apply(super.getNextNotification(rqst)); + public NotificationEventResponse getNextNotification(NotificationEventRequest request) { + return getNextNotificationModifier.apply(super.getNextNotification(request)); } @Override - public Table alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds) + public Table alterTable(String catName, String dbName, String tableName, Table newTable, String queryValidWriteIds) throws InvalidObjectException, MetaException { if (alterTableModifier != null) { - CallerArguments args = new CallerArguments(dbname); - args.tblName = name; + CallerArguments args = new CallerArguments(dbName); + args.tblName = tableName; Boolean success = alterTableModifier.apply(args); if ((success != null) && !success) { throw new MetaException("InjectableBehaviourObjectStore: Invalid alterTable operation on Catalog : " + catName + - " DB: " + dbname + " table: " + name); + " DB: " + dbName + " table: " + tableName); } } - return super.alterTable(catName, dbname, name, newTable, queryValidWriteIds); + return super.alterTable(catName, dbName, tableName, newTable, queryValidWriteIds); } @Override @@ -262,12 +262,12 @@ public void createFunction(Function func) throws InvalidObjectException, MetaExc } @Override - public boolean alterDatabase(String catalogName, String dbname, Database db) + public boolean alterDatabase(String catalogName, String dbName, Database db) throws NoSuchObjectException, MetaException { if (callerVerifier != null) { - CallerArguments args = new CallerArguments(dbname); + CallerArguments args = new CallerArguments(dbName); callerVerifier.apply(args); } - return super.alterDatabase(catalogName, dbname, db); + return super.alterDatabase(catalogName, dbName, db); } } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index 27c5bba5f7..396226bd02 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -198,7 +197,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { for (int i = 0; i < 10; i++) { partNames.add("ds=" + i); } - AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, + AggrStats aggrStats = store.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, Arrays.asList("col1")); statChecker.checkStats(aggrStats); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index bb20d9f42a..a5eff867bf 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -52,14 +52,11 @@ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import jline.internal.Log; - import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; @Category(MetastoreCheckinTest.class) @@ -728,9 +725,9 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); } @@ -800,10 +797,10 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); } @@ -888,10 +885,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.getAggrStatsFor(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); }