diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 9eb1193a27120b5167f92daf67bf6a1c4e1d9927..1717daf1dfd82c82c156f8ceeeb90c1d710b8d10 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; @@ -127,6 +129,7 @@ private String tokenStrForm; private final boolean localMetaStore; private final MetaStoreFilterHook filterHook; + private static boolean isClientFilterEnabled = false; private final URIResolverHook uriResolverHook; private final int fileMetadataBatchSize; @@ -164,6 +167,7 @@ public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Bo } version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; filterHook = loadFilterHooks(); + isClientFilterEnabled = MetastoreConf.getBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED); uriResolverHook = loadUriResolverHook(); fileMetadataBatchSize = MetastoreConf.getIntVar( conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); @@ -731,13 +735,13 @@ public void alterCatalog(String catalogName, Catalog newCatalog) throws TExcepti @Override public Catalog getCatalog(String catName) throws TException { GetCatalogResponse rsp = client.get_catalog(new GetCatalogRequest(catName)); - return rsp == null ? null : filterHook.filterCatalog(rsp.getCatalog()); + return rsp == null ? null : (isClientFilterEnabled ? filterHook.filterCatalog(rsp.getCatalog()) : rsp.getCatalog()); } @Override public List getCatalogs() throws TException { GetCatalogsResponse rsp = client.get_catalogs(); - return rsp == null ? null : filterHook.filterCatalogs(rsp.getNames()); + return rsp == null ? null : (isClientFilterEnabled ? filterHook.filterCatalogs(rsp.getNames()) : rsp.getNames()); } @Override @@ -808,7 +812,8 @@ public int add_partitions(List new_parts) throws TException { req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf)); req.setNeedResult(needResults); AddPartitionsResult result = client.add_partitions_req(req); - return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; + return needResults ? + (isClientFilterEnabled ? filterHook.filterPartitions(result.getPartitions()) : result.getPartitions()) : null; } @Override @@ -1619,8 +1624,9 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List getDatabases(String catName, String databasePattern) throws TException { - return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName( - catName, databasePattern, conf))); + List databases = client.get_databases(prependCatalogToDbName( + catName, databasePattern, conf)); + return isClientFilterEnabled ? filterHook.filterDatabases(databases) : databases; } @Override @@ -1630,7 +1636,8 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List getAllDatabases(String catName) throws TException { - return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf))); + List databases = client.get_databases(prependCatalogToDbName(catName, null, conf)); + return isClientFilterEnabled ? filterHook.filterDatabases(databases) : databases; } @Override @@ -1644,7 +1651,8 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException int max_parts) throws TException { List parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf), tbl_name, shrinkMaxtoShort(max_parts)); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + parts = isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts; + return deepCopyPartitions(parts); } @Override @@ -1655,8 +1663,11 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in @Override public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, int maxParts) throws TException { - return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts))); + List partitionSpecs = + client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts); + partitionSpecs = isClientFilterEnabled ? filterHook.filterPartitionSpecs(partitionSpecs) : partitionSpecs; + + return PartitionSpecProxy.Factory.get(partitionSpecs); } @Override @@ -1670,7 +1681,8 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List part_vals, int max_parts) throws TException { List parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_vals, shrinkMaxtoShort(max_parts)); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + parts = isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts; + return deepCopyPartitions(parts); } @Override @@ -1687,7 +1699,8 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri List groupNames) throws TException { List parts = client.get_partitions_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + parts = isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts; + return deepCopyPartitions(parts); } @Override @@ -1706,7 +1719,8 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri throws TException { List parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + parts = isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts; + return deepCopyPartitions(parts); } @Override @@ -1720,7 +1734,8 @@ public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, Stri String filter, int max_parts) throws TException { List parts =client.get_partitions_by_filter(prependCatalogToDbName( catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts)); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + parts = isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts; + return deepCopyPartitions(parts); } @Override @@ -1734,9 +1749,12 @@ public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_ public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name, String filter, int max_parts) throws TException { - return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + List partitionSpecs = client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter, - max_parts))); + max_parts); + partitionSpecs = isClientFilterEnabled ? + filterHook.filterPartitionSpecs(partitionSpecs) : partitionSpecs; + return PartitionSpecProxy.Factory.get(partitionSpecs); } @Override @@ -1772,7 +1790,10 @@ public boolean listPartitionsByExpr(String catName, String db_name, String tbl_n throw new IncompatibleMetastoreException( "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); } - r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + + List partitions = isClientFilterEnabled ? + filterHook.filterPartitions(r.getPartitions()) : r.getPartitions(); + r.setPartitions(partitions); // TODO: in these methods, do we really need to deepcopy? deepCopyPartitions(r.getPartitions(), result); return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. @@ -1786,7 +1807,7 @@ public Database getDatabase(String name) throws TException { @Override public Database getDatabase(String catalogName, String databaseName) throws TException { Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf)); - return deepCopy(filterHook.filterDatabase(d)); + return deepCopy(isClientFilterEnabled ? filterHook.filterDatabase(d) : d); } @Override @@ -1799,7 +1820,7 @@ public Partition getPartition(String db_name, String tbl_name, List part public Partition getPartition(String catName, String dbName, String tblName, List partVals) throws TException { Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals); - return deepCopy(filterHook.filterPartition(p)); + return deepCopy(isClientFilterEnabled ? filterHook.filterPartition(p) : p); } @Override @@ -1811,9 +1832,12 @@ public Partition getPartition(String catName, String dbName, String tblName, @Override public List getPartitionsByNames(String catName, String db_name, String tbl_name, List part_names) throws TException { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); List parts = client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names); - return deepCopyPartitions(filterHook.filterPartitions(parts)); + return deepCopyPartitions(isClientFilterEnabled ? filterHook.filterPartitions(parts) : parts); } @Override @@ -1839,7 +1863,7 @@ public Partition getPartitionWithAuthInfo(String catName, String dbName, String List groupNames) throws TException { Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, pvals, userName, groupNames); - return deepCopy(filterHook.filterPartition(p)); + return deepCopy(isClientFilterEnabled ? filterHook.filterPartition(p) : p); } @Override @@ -1853,7 +1877,7 @@ public Table getTable(String catName, String dbName, String tableName) throws TE req.setCatName(catName); req.setCapabilities(version); Table t = client.get_table_req(req).getTable(); - return deepCopy(filterHook.filterTable(t)); + return deepCopy(isClientFilterEnabled ? filterHook.filterTable(t) : t); } @Override @@ -1864,7 +1888,7 @@ public Table getTable(String catName, String dbName, String tableName, req.setCapabilities(version); req.setValidWriteIdList(validWriteIdList); Table t = client.get_table_req(req).getTable(); - return deepCopy(filterHook.filterTable(t)); + return deepCopy(isClientFilterEnabled ? filterHook.filterTable(t) : t); } @Override @@ -1881,7 +1905,7 @@ public Table getTable(String catName, String dbName, String tableName, req.setTblNames(tableNames); req.setCapabilities(version); List tabs = client.get_table_objects_by_name_req(req).getTables(); - return deepCopyTables(filterHook.filterTables(tabs)); + return deepCopyTables(isClientFilterEnabled ? filterHook.filterTables(tabs) : tabs); } @Override @@ -1913,9 +1937,10 @@ public void updateCreationMetadata(String catName, String dbName, String tableNa @Override public List listTableNamesByFilter(String catName, String dbName, String filter, int maxTables) throws TException { - return filterHook.filterTableNames(catName, dbName, + List tableNames = client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter, - shrinkMaxtoShort(maxTables))); + shrinkMaxtoShort(maxTables)); + return isClientFilterEnabled ? filterHook.filterTableNames(catName, dbName, tableNames) : tableNames; } /** @@ -1943,8 +1968,9 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getTables(String catName, String dbName, String tablePattern) throws TException { - return filterHook.filterTableNames(catName, dbName, - client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern)); + List tables = client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern); + + return isClientFilterEnabled ? filterHook.filterTableNames(catName, dbName, tables) : tables; } @Override @@ -1960,9 +1986,10 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getTables(String catName, String dbName, String tablePattern, TableType tableType) throws TException { - return filterHook.filterTableNames(catName, dbName, + List tables = client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern, - tableType.toString())); + tableType.toString()); + return isClientFilterEnabled ? filterHook.filterTableNames(catName, dbName, tables) : tables; } @Override @@ -1974,8 +2001,9 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE public List getMaterializedViewsForRewriting(String catName, String dbname) throws MetaException { try { - return filterHook.filterTableNames(catName, dbname, - client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf))); + List views = + client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf)); + return isClientFilterEnabled ? filterHook.filterTableNames(catName, dbname, views) : views; } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1996,8 +2024,10 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getTableMeta(String catName, String dbPatterns, String tablePatterns, List tableTypes) throws TException { - return filterHook.filterTableMetas(catName,dbPatterns,client.get_table_meta(prependCatalogToDbName( - catName, dbPatterns, conf), tablePatterns, tableTypes)); + List tableMetas = client.get_table_meta(prependCatalogToDbName( + catName, dbPatterns, conf), tablePatterns, tableTypes); + return isClientFilterEnabled ? + filterHook.filterTableMetas(catName,dbPatterns, tableMetas) : tableMetas; } @Override @@ -2012,8 +2042,9 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getAllTables(String catName, String dbName) throws TException { - return filterHook.filterTableNames(catName, dbName, client.get_all_tables( - prependCatalogToDbName(catName, dbName, conf))); + List tableNames = client.get_all_tables( + prependCatalogToDbName(catName, dbName, conf)); + return isClientFilterEnabled ? filterHook.filterTableNames(catName, dbName, tableNames) : tableNames; } @Override @@ -2027,7 +2058,8 @@ public boolean tableExists(String catName, String dbName, String tableName) thro GetTableRequest req = new GetTableRequest(dbName, tableName); req.setCatName(catName); req.setCapabilities(version); - return filterHook.filterTable(client.get_table_req(req).getTable()) != null; + Table table = client.get_table_req(req).getTable(); + return (isClientFilterEnabled ? filterHook.filterTable(table) : table) != null; } catch (NoSuchObjectException e) { return false; } @@ -2042,8 +2074,11 @@ public boolean tableExists(String catName, String dbName, String tableName) thro @Override public List listPartitionNames(String catName, String dbName, String tableName, int maxParts) throws TException { - return filterHook.filterPartitionNames(catName, dbName, tableName, - client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts))); + List partNames = + client.get_partition_names( + prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)); + return isClientFilterEnabled ? + filterHook.filterPartitionNames(catName, dbName, tableName, partNames) : partNames; } @Override @@ -2055,9 +2090,10 @@ public boolean tableExists(String catName, String dbName, String tableName) thro @Override public List listPartitionNames(String catName, String db_name, String tbl_name, List part_vals, int max_parts) throws TException { - return filterHook.filterPartitionNames(catName, db_name, tbl_name, - client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, - part_vals, shrinkMaxtoShort(max_parts))); + List partNames = client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, + part_vals, shrinkMaxtoShort(max_parts)); + return isClientFilterEnabled ? + filterHook.filterPartitionNames(catName, db_name, tbl_name, partNames) : partNames; } @Override @@ -2384,7 +2420,7 @@ public Partition getPartition(String catName, String dbName, String tblName, Str throws TException { Partition p = client.get_partition_by_name(prependCatalogToDbName(catName, dbName, conf), tblName, name); - return deepCopy(filterHook.filterPartition(p)); + return deepCopy(isClientFilterEnabled ? filterHook.filterPartition(p) : p); } public Partition appendPartitionByName(String dbName, String tableName, String partName) @@ -2419,6 +2455,32 @@ private HiveMetaHook getHook(Table tbl) throws MetaException { return hookLoader.getHook(tbl); } + /** + * This is a helper method to filter out a given database or a table name. This will improve + * performance when filtering partitions. If the db or table is filtered out, we don't need + * to even fetch the partitions. We can throw a NoSuchObjectException. + * @param dbName the database name + * @param tblName the table name contained in the database + * @throws NoSuchObjectException if the database or table is filtered out + */ + private void checkDbAndTableFilters(final String dbName, final String tblName) + throws NoSuchObjectException, MetaException { + String[] parsedDbName = parseDbName(dbName, conf); + List filteredDb = isClientFilterEnabled ? + filterHook.filterDatabases(Collections.singletonList(dbName)): + Collections.singletonList(dbName); + if (filteredDb.isEmpty()) { + throw new NoSuchObjectException("Database " + dbName + " does not exist"); + } + + List filteredTable = isClientFilterEnabled ? + filterHook.filterTableNames(parsedDbName[CAT_NAME], + dbName, Collections.singletonList(tblName)) : Collections.singletonList(tblName); + if (filteredTable.isEmpty()) { + throw new NoSuchObjectException("Table " + tblName + " does not exist"); + } + } + @Override public List partitionNameToVals(String name) throws MetaException, TException { return client.partition_name_to_vals(name); diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index fb0b2fe6fb9fd4b4c92a6a39f06f39a4641aaabd..c4ab86eacea844d4a64c06c5e96f4a0012bb7fd0 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -216,7 +216,9 @@ public String toString() { ConfVars.AGGREGATE_STATS_CACHE_MAX_FULL, ConfVars.AGGREGATE_STATS_CACHE_CLEAN_UNTIL, ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, - ConfVars.FILE_METADATA_THREADS + ConfVars.FILE_METADATA_THREADS, + ConfVars.METASTORE_CLIENT_FILTER_ENABLED, + ConfVars.METASTORE_SERVER_FILTER_ENABLED }; /** @@ -657,6 +659,10 @@ public static ConfVars getMetaConf(String name) { "metadata being exported to the current user's home directory on HDFS."), METASTORE_MAX_EVENT_RESPONSE("metastore.max.event.response", "hive.metastore.max.event.response", 1000000, "The parameter will decide the maximum number of events that HMS will respond."), + METASTORE_CLIENT_FILTER_ENABLED("metastore.client.filter.enabled", "metastore.client.filter.enabled", true, + "Enable filtering the metadata read results at HMS client"), + METASTORE_SERVER_FILTER_ENABLED("metastore.server.filter.enabled", "metastore.server.filter.enabled", false, + "Enable filtering the metadata read results at HMS server"), MOVE_EXPORTED_METADATA_TO_TRASH("metastore.metadata.move.exported.metadata.to.trash", "hive.metadata.move.exported.metadata.to.trash", true, "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" + diff --git a/standalone-metastore/metastore-server/pom.xml b/standalone-metastore/metastore-server/pom.xml index 895abfc423f00b121ee63e40904f5b3e57aea8ed..f67ec487112299d95ee185232bf3293fb1f0b8de 100644 --- a/standalone-metastore/metastore-server/pom.xml +++ b/standalone-metastore/metastore-server/pom.xml @@ -239,6 +239,11 @@ curator-test test + + org.apache.hive + hive-common + 4.0.0-SNAPSHOT + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 0a1b96dcf62d3536cab2ce074d27a6225b2d3443..b266e10d729f1e90dc76c2089022d27ef7da7423 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -30,6 +30,8 @@ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName; import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; @@ -73,11 +75,13 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ZooKeeperHiveHelper; import org.apache.hadoop.hive.common.ZKDeRegisterWatcher; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -208,6 +212,8 @@ // embedded metastore or a remote one private static boolean isMetaStoreRemote = false; + private static boolean isServerFilterEnabled = false; + // Used for testing to simulate method timeout. @VisibleForTesting static boolean TEST_TIMEOUT_ENABLED = false; @@ -505,6 +511,7 @@ public Configuration getHiveConf() { private List transactionalListeners; private List endFunctionListeners; private List initListeners; + private MetaStoreFilterHook filterHook; private Pattern partitionValidationPattern; private final boolean isInTest; @@ -615,6 +622,24 @@ public void init() throws MetaException { } expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); fileMetadataManager = new FileMetadataManager(this.getMS(), conf); + + filterHook = loadFilterHooks(); + isServerFilterEnabled = MetastoreConf.getBoolVar(conf, ConfVars.METASTORE_SERVER_FILTER_ENABLED); + } + + private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { + Class authProviderClass = MetastoreConf. + getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + try { + Constructor constructor = + authProviderClass.getConstructor(Configuration.class); + return constructor.newInstance(conf); + } catch (NoSuchMethodException | IllegalStateException | InstantiationException + | InvocationTargetException | IllegalAccessException e) { + String errorMsg = "Unable to create instance of " + authProviderClass.getName() + ": "; + throw new IllegalStateException(errorMsg + e.getMessage(), e); + } } private static String addPrefix(String s) { @@ -1159,7 +1184,7 @@ public GetCatalogResponse get_catalog(GetCatalogRequest rqst) ex = e; throw e; } finally { - endFunction("get_database", cat != null, ex); + endFunction("get_catalog", cat != null, ex); } } @@ -1376,6 +1401,10 @@ public Database get_database(final String name) throws NoSuchObjectException, Me try { String[] parsedDbName = parseDbName(name, conf); db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); + if (isServerFilterEnabled) { + db = filterHook.filterDatabase(db); + } + firePreEvent(new PreReadDatabaseEvent(db, this)); } catch (MetaException|NoSuchObjectException e) { ex = e; @@ -1676,8 +1705,10 @@ public void drop_database(final String dbName, final boolean deleteData, final b try { if (parsedDbNamed[DB_NAME] == null) { ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]); + ret = filterDbNamesIfEnabled(ret); } else { ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]); + ret = filterDbNamesIfEnabled(ret); } } catch (MetaException e) { ex = e; @@ -1693,6 +1724,7 @@ public void drop_database(final String dbName, final boolean deleteData, final b @Override public List get_all_databases() throws MetaException { + // get_databases filters results already. No need to filter here return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf)); } @@ -2899,16 +2931,19 @@ private boolean isExternalTablePurge(Table table) { public Table get_table(final String dbname, final String name) throws MetaException, NoSuchObjectException { String[] parsedDbName = parseDbName(dbname, conf); + return getTableInternal( - parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null); } @Override public GetTableResult get_table_req(GetTableRequest req) throws MetaException, NoSuchObjectException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(), - req.getCapabilities(), req.getValidWriteIdList())); + Table t = getTableInternal(catName, req.getDbName(), req.getTblName(), + req.getCapabilities(), req.getValidWriteIdList()); + + return new GetTableResult(t); } private Table getTableInternal(String catName, String dbname, String name, @@ -2929,6 +2964,10 @@ private Table getTableInternal(String catName, String dbname, String name, "insert-only tables", "get_table_req"); } firePreEvent(new PreReadTableEvent(t, this)); + + if (isServerFilterEnabled) { + t = filterHook.filterTable(t); + } } catch (MetaException | NoSuchObjectException e) { ex = e; throw e; @@ -3007,14 +3046,20 @@ public Table get_table_core( public List
get_table_objects_by_name(final String dbName, final List tableNames) throws MetaException, InvalidOperationException, UnknownDBException { String[] parsedDbName = parseDbName(dbName, conf); - return getTableObjectsInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableNames, null); + + List
ret = + getTableObjectsInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableNames, null); + + return ret; } @Override public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws TException { String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - return new GetTablesResult(getTableObjectsInternal(catName, - req.getDbName(), req.getTblNames(), req.getCapabilities())); + List
ret = getTableObjectsInternal(catName, req.getDbName(), + req.getTblNames(), req.getCapabilities()); + + return new GetTablesResult(ret); } private List
getTableObjectsInternal(String catName, String dbName, @@ -3067,6 +3112,11 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw "insert-only tables", "get_table_req"); } } + + if (isServerFilterEnabled) { + tables = filterHook.filterTables(tables); + } + } catch (MetaException | InvalidOperationException | UnknownDBException e) { ex = e; throw e; @@ -3125,6 +3175,8 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi throw new InvalidOperationException(filter + " cannot apply null filter"); } tables = getMS().listTableNamesByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], filter, maxTables); + tables = filterTableNamesIfEnabled(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tables); + } catch (MetaException | InvalidOperationException | UnknownDBException e) { ex = e; throw e; @@ -4525,8 +4577,12 @@ public Partition get_partition(final String db_name, final String tbl_name, Partition ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); + ret = filterPartitionIfEnabled(ret); } catch (Exception e) { ex = e; throwMetaException(e); @@ -4537,6 +4593,30 @@ public Partition get_partition(final String db_name, final String tbl_name, } /** + * This is a helper method to filter out a given database or a table name. This will improve + * performance when filtering partitions. If the db or table is filtered out, we don't need + * to even fetch the partitions. We can throw a NoSuchObjectException. + * @param dbName the database name + * @param tblName the table name contained in the database + * @throws NoSuchObjectException if the database or table is filtered out + */ + private void checkDbAndTableFilters(final String dbName, final String tblName) + throws NoSuchObjectException, MetaException { + String[] parsedDbName = parseDbName(dbName, conf); + List filteredDb = filterDbNamesIfEnabled(Collections.singletonList(dbName)); + + if (filteredDb.isEmpty()) { + throw new NoSuchObjectException("Database " + dbName + " does not exist"); + } + + List filteredTable = + filterTableNamesIfEnabled(parsedDbName[CAT_NAME], dbName, Collections.singletonList(tblName)); + if (filteredTable.isEmpty()) { + throw new NoSuchObjectException("Table " + tblName + " does not exist"); + } + } + + /** * Fire a pre-event for read table operation, if there are any * pre-event listeners registered */ @@ -4566,8 +4646,12 @@ public Partition get_partition_with_auth(final String db_name, Partition ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, user_name, group_names); + ret = filterPartitionIfEnabled(ret); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -4589,10 +4673,14 @@ public Partition get_partition_with_auth(final String db_name, List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, NO_FILTER_STRING, max_parts); ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, max_parts); + ret = filterPartitionsIfEnabled(ret); } catch (Exception e) { ex = e; throwMetaException(e); @@ -4613,10 +4701,14 @@ public Partition get_partition_with_auth(final String db_name, List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(dbName, tblName); checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, NO_FILTER_STRING, maxParts); ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, maxParts, userName, groupNames); + ret = filterPartitionsIfEnabled(ret); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -4752,8 +4844,15 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, max_parts); + if (isServerFilterEnabled) { + ret = filterHook + .filterPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); + } } catch (MetaException e) { ex = e; throw e; @@ -5100,6 +5199,7 @@ private void alter_table_core(String catName, String dbname, String name, Table String[] parsedDbName = parseDbName(dbname, conf); try { ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); + ret = filterTableNamesIfEnabled(parsedDbName[CAT_NAME], dbname, ret); } catch (MetaException e) { ex = e; throw e; @@ -5165,6 +5265,7 @@ private void alter_table_core(String catName, String dbname, String name, Table String[] parsedDbName = parseDbName(dbname, conf); try { ret = getMS().getAllTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); + ret = filterTableNamesIfEnabled(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], ret); } catch (MetaException e) { ex = e; throw e; @@ -5227,6 +5328,41 @@ private void alter_table_core(String catName, String dbname, String name, Table return ret; } + private List filterDbNamesIfEnabled(List dbNames) throws MetaException { + if (isServerFilterEnabled) { + return filterHook.filterDatabases(dbNames); + } + + return dbNames; + } + + private List filterTableNamesIfEnabled(String catName, String dbName, + List tableNames) throws MetaException{ + if (isServerFilterEnabled) { + return filterHook.filterTableNames(catName, dbName, tableNames); + } + + return tableNames; + } + + private Partition filterPartitionIfEnabled(Partition p) + throws MetaException, NoSuchObjectException { + if (isServerFilterEnabled) { + return filterHook.filterPartition(p); + } + + return p; + } + + private List filterPartitionsIfEnabled(List partitions) + throws MetaException { + if (isServerFilterEnabled) { + return filterHook.filterPartitions(partitions); + } + + return partitions; + } + private StorageSchemaReader getStorageSchemaReader() throws MetaException { if (storageSchemaReader == null) { String className = @@ -5417,6 +5553,7 @@ private Partition get_partition_by_name_core(final RawStore ms, final String cat throw new NoSuchObjectException(e.getMessage()); } Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); + p = filterPartitionIfEnabled(p); if (p == null) { throw new NoSuchObjectException(TableName.getQualified(catName, db_name, tbl_name) @@ -5437,7 +5574,9 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na Exception ex = null; try { ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_name); } catch (Exception e) { + parsedDbName[DB_NAME], tbl_name, part_name); + ret = filterPartitionIfEnabled(ret); + } catch (Exception e) { ex = e; rethrowException(e); } finally { @@ -5540,9 +5679,13 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); // Don't send the parsedDbName, as this method will parse itself. ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, null, null); + ret = filterPartitionsIfEnabled(ret); } catch (Exception e) { ex = e; rethrowException(e); @@ -5565,8 +5708,12 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, max_parts, userName, groupNames); + ret = filterPartitionsIfEnabled(ret); } catch (InvalidObjectException e) { ex = e; throw new MetaException(e.getMessage()); @@ -5590,8 +5737,15 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(db_name, tbl_name); ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, max_parts); + if (isServerFilterEnabled) { + ret = filterHook + .filterPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, ret); + } } catch (Exception e) { ex = e; rethrowException(e); @@ -6032,10 +6186,14 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(dbName, tblName); checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, filter, maxParts); ret = getMS().getPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, filter, maxParts); + ret = filterPartitionsIfEnabled(ret); } catch (Exception e) { ex = e; rethrowException(e); @@ -6169,8 +6327,12 @@ private int get_num_partitions_by_expr(final String catName, final String dbName List ret = null; Exception ex = null; try { + // For improved performance, we'll check if the said db and table are to be filtered out. + // If so, then we won't proceed with querying the partitions. + checkDbAndTableFilters(dbName, tblName); ret = getMS().getPartitionsByNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, partNames); + ret = filterPartitionsIfEnabled(ret); } catch (Exception e) { ex = e; rethrowException(e); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreFilterHook.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreFilterHook.java new file mode 100644 index 0000000000000000000000000000000000000000..b83157f16d36b1afc393c39caffe424326d3900e --- /dev/null +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreFilterHook.java @@ -0,0 +1,361 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import com.google.common.collect.Lists; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; + + +public class TestHiveMetastoreFilterHook { + public static class DummyMetaStoreFilterHookImpl implements MetaStoreFilterHook { + private static boolean blockResults = false; + + public DummyMetaStoreFilterHookImpl(Configuration conf) { + } + + @Override + public List filterDatabases(List dbList) throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return dbList; + } + + @Override + public Database filterDatabase(Database dataBase) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return dataBase; + } + + @Override + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return tableList; + } + + @Override + public Table filterTable(Table table) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return table; + } + + @Override + public List
filterTables(List
tableList) throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return tableList; + } + + @Override + public List filterTableMetas(String catName, String dbName,List tableMetas) throws MetaException { + return tableMetas; + } + + @Override + public List filterPartitions(List partitionList) throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return partitionList; + } + + @Override + public List filterPartitionSpecs( + List partitionSpecList) throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return partitionSpecList; + } + + @Override + public Partition filterPartition(Partition partition) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return partition; + } + + @Override + public List filterPartitionNames(String catName, String dbName, String tblName, + List partitionNames) throws MetaException { + if (blockResults) { + return new ArrayList<>(); + } + return partitionNames; + } + } + + protected static HiveMetaStoreClient client; + protected static Configuration conf; + protected static Warehouse warehouse; + + private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100; + + private static String DBNAME1 = "testdb1"; + private static String DBNAME2 = "testdb2"; + private static final String TAB1 = "tab1"; + private static final String TAB2 = "tab2"; + + + protected HiveMetaStoreClient createClient(Configuration metaStoreConf) throws Exception { + try { + return new HiveMetaStoreClient(metaStoreConf); + } catch (Throwable e) { + System.err.println("Unable to open the metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + @BeforeClass + public static void setUp() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = true; + } + + @Before + public void setUpForTest() throws Exception { + + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); + conf.set("hive.key1", "value1"); + conf.set("hive.key2", "http://www.example.com"); + conf.set("hive.key3", ""); + conf.set("hive.key4", "0"); + conf.set("datanucleus.autoCreateTables", "false"); + conf.set("hive.in.test", "true"); + + MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + + warehouse = new Warehouse(conf); + } + + @After + public void tearDown() throws Exception { + if (client != null) { + client.close(); + } + } + + /** + * This is called in each test after the configuration is set in each test case + * @throws Exception + */ + protected void creatEnv(Configuration conf) throws Exception { + client = createClient(conf); + + client.dropDatabase(DBNAME1, true, true, true); + client.dropDatabase(DBNAME2, true, true, true); + Database db1 = new DatabaseBuilder() + .setName(DBNAME1) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + Database db2 = new DatabaseBuilder() + .setName(DBNAME2) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB1) + .addCol("id", "int") + .addCol("name", "string") + .create(client, conf); + Table tab2 = new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB2) + .addCol("id", "int") + .addPartCol("name", "string") + .create(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value1") + .addToTable(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value2") + .addToTable(client, conf); + } + + /** + * The default configuration should be disable filtering at HMS server + * Disable the HMS client side filtering in order to see HMS server filtering behavior + * @throws Exception + */ + @Test + public void testHMSServerWithoutFilter() throws Exception { + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED, false); + DBNAME1 = "db_testHMSServerWithoutFilter_1"; + DBNAME2 = "db_testHMSServerWithoutFilter_2"; + creatEnv(conf); + + assertNotNull(client.getTable(DBNAME1, TAB1)); + assertEquals(2, client.getTables(DBNAME1, "*").size()); + assertEquals(2, client.getAllTables(DBNAME1).size()); + assertEquals(1, client.getTables(DBNAME1, TAB2).size()); + assertEquals(0, client.getAllTables(DBNAME2).size()); + + assertNotNull(client.getDatabase(DBNAME1)); + assertEquals(2, client.getDatabases("*testHMSServerWithoutFilter*").size()); + assertEquals(1, client.getDatabases(DBNAME1).size()); + + assertNotNull(client.getPartition(DBNAME1, TAB2, "name=value1")); + assertEquals(1, client.getPartitionsByNames(DBNAME1, TAB2, Lists.newArrayList("name=value1")).size()); + } + + /** + * Enable the HMS server side filtering + * Disable the HMS client side filtering in order to see HMS server filtering behavior + * @throws Exception + */ + @Test + public void testHMSServerWithFilter() throws Exception { + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED, false); + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_SERVER_FILTER_ENABLED, true); + DBNAME1 = "db_testHMSServerWithFilter_1"; + DBNAME2 = "db_testHMSServerWithFilter_2"; + creatEnv(conf); + + testFilterForDb(); + testFilterForTables(); + testFilterForPartition(); + } + + /** + * Disable filtering at HMS client + * By default, the HMS server side filtering is diabled, so we can see HMS client filtering behavior + * @throws Exception + */ + @Test + public void testHMSClientWithoutFilter() throws Exception { + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED, false); + DBNAME1 = "db_testHMSClientWithoutFilter_1"; + DBNAME2 = "db_testHMSClientWithoutFilter_2"; + creatEnv(conf); + + assertNotNull(client.getTable(DBNAME1, TAB1)); + assertEquals(2, client.getTables(DBNAME1, "*").size()); + assertEquals(2, client.getAllTables(DBNAME1).size()); + assertEquals(1, client.getTables(DBNAME1, TAB2).size()); + assertEquals(0, client.getAllTables(DBNAME2).size()); + + assertNotNull(client.getDatabase(DBNAME1)); + assertEquals(2, client.getDatabases("*testHMSClientWithoutFilter*").size()); + assertEquals(1, client.getDatabases(DBNAME1).size()); + + assertNotNull(client.getPartition(DBNAME1, TAB2, "name=value1")); + assertEquals(1, client.getPartitionsByNames(DBNAME1, TAB2, Lists.newArrayList("name=value1")).size()); + } + + /** + * By default, the HMS Client side filtering is enabled + * Disable the HMS server side filtering in order to see HMS client filtering behavior + * @throws Exception + */ + @Test + public void testHMSClientWithFilter() throws Exception { + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_SERVER_FILTER_ENABLED, false); + DBNAME1 = "db_testHMSClientWithFilter_1"; + DBNAME2 = "db_testHMSClientWithFilter_2"; + creatEnv(conf); + + testFilterForDb(); + testFilterForTables(); + testFilterForPartition(); + } + + protected void testFilterForDb() throws Exception { + try { + assertNotNull(client.getDatabase(DBNAME1)); + fail("getDatabase() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + + assertEquals(0, client.getDatabases("*").size()); + assertEquals(0, client.getAllDatabases().size()); + assertEquals(0, client.getDatabases(DBNAME1).size()); + } + + protected void testFilterForTables() throws Exception { + try { + client.getTable(DBNAME1, TAB1); + fail("getTable() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + assertEquals(0, client.getTables(DBNAME1, "*").size()); + assertEquals(0, client.getAllTables(DBNAME1).size()); + assertEquals(0, client.getTables(DBNAME1, TAB2).size()); + } + + protected void testFilterForPartition() throws Exception { + try { + assertNotNull(client.getPartition(DBNAME1, TAB2, "name=value1")); + fail("getPartition() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + + try { + client.getPartitionsByNames(DBNAME1, TAB2, + Lists.newArrayList("name=value1")).size(); + } catch (NoSuchObjectException e) { + // Excepted + } + } +}