diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c0ba8673d9cd7d9c8b0994bf40e84966d8c9d288..1f05fe3ac78f2e8d45b6b4022f7576e7568bf3dc 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1597,7 +1597,8 @@ private void drop_database_core(RawStore ms, String catName, ConfVars.BATCH_RETRIEVE_MAX); // First pass will drop the materialized views - List materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString()); + List materializedViewNames = get_tables_by_type_core(catName, name,".*", + TableType.MATERIALIZED_VIEW.toString()); int startIndex = 0; // retrieve the tables from the metastore in batches to alleviate memory constraints while (startIndex < materializedViewNames.size()) { @@ -5265,7 +5266,7 @@ private void alter_table_core(String catName, String dbname, String name, Table try { ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); ret = FilterUtils.filterTableNamesIfEnabled(isServerFilterEnabled, filterHook, - parsedDbName[CAT_NAME], dbname, ret); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], ret); } catch (MetaException e) { ex = e; throw e; @@ -5287,7 +5288,9 @@ private void alter_table_core(String catName, String dbname, String name, Table Exception ex = null; String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, TableType.valueOf(tableType)); + ret = get_tables_by_type_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, tableType); + ret = FilterUtils.filterTableNamesIfEnabled(isServerFilterEnabled, filterHook, + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], ret); } catch (MetaException e) { ex = e; throw e; @@ -5300,6 +5303,26 @@ private void alter_table_core(String catName, String dbname, String name, Table return ret; } + private List get_tables_by_type_core(final String catName, final String dbname, final String pattern, final String tableType) + throws MetaException { + startFunction("get_tables_by_type_core", ": catName=" + catName + ": db=" + dbname + " pat=" + pattern + ",type=" + tableType); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getTables(catName, dbname, pattern, TableType.valueOf(tableType)); + } catch (MetaException e) { + ex = e; + throw e; + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("get_tables_by_type_core", ret != null, ex); + } + return ret; + } + @Override public List get_materialized_views_for_rewriting(final String dbname) throws MetaException { @@ -5367,6 +5390,7 @@ private void alter_table_core(String catName, String dbname, String name, Table try { try { tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); + firePreEvent(new PreReadTableEvent(tbl, this)); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 49c7d88fcb082d421b454c13690de492ca9c9f95..23faa7444a8e9f9c010290539f89b9d8b44f3aa8 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -356,6 +356,7 @@ protected void testFilterForTables(boolean filterAtServer) throws Exception { } assertEquals(0, client.getTables(DBNAME1, "*").size()); + assertEquals(0, client.getTables(DBNAME1, "*", TableType.MANAGED_TABLE).size()); assertEquals(0, client.getAllTables(DBNAME1).size()); assertEquals(0, client.getTables(DBNAME1, TAB2).size()); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java new file mode 100644 index 0000000000000000000000000000000000000000..cc471d96b32e15bf45afb9ba08996057e43f4b54 --- /dev/null +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHmsServerAuthorization.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.booleanThat; +import static org.mockito.Mockito.mock; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.junit.experimental.categories.Category; + +import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import com.google.common.collect.Lists; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.junit.experimental.categories.Category; + +/** + * Test the filtering behavior at HMS client and HMS server. The configuration at each test + * changes, and therefore HMS client and server are created for each test case + */ +@Category(MetastoreUnitTest.class) +public class TestHmsServerAuthorization { + public static class DummyAuthorizationListenerImpl extends MetaStorePreEventListener { + private static boolean throwExceptionAtCall = false; + public DummyAuthorizationListenerImpl(Configuration config) { + super(config); + } + + @Override + public void onEvent(PreEventContext context) + throws MetaException, NoSuchObjectException, InvalidOperationException { + if (throwExceptionAtCall) { + throw new MetaException("Authorization fails"); + } + } + } + + protected static HiveMetaStoreClient client; + protected static Configuration conf; + protected static Warehouse warehouse; + + private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100; + + private static String DBNAME1 = "testdb1"; + private static String DBNAME2 = "testdb2"; + private static final String TAB1 = "tab1"; + private static final String TAB2 = "tab2"; + + + protected HiveMetaStoreClient createClient(Configuration metaStoreConf) throws Exception { + try { + return new HiveMetaStoreClient(metaStoreConf); + } catch (Throwable e) { + System.err.println("Unable to open the metastore"); + System.err.println(StringUtils.stringifyException(e)); + throw new Exception(e); + } + } + + @Before + public void setUpForTest() throws Exception { + + // make sure env setup works + TestHmsServerAuthorization.DummyAuthorizationListenerImpl.throwExceptionAtCall = false; + + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setClass(conf, ConfVars.PRE_EVENT_LISTENERS, DummyAuthorizationListenerImpl.class, + MetaStorePreEventListener.class); + MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); + conf.set("hive.key1", "value1"); + conf.set("hive.key2", "http://www.example.com"); + conf.set("hive.key3", ""); + conf.set("hive.key4", "0"); + conf.set("datanucleus.autoCreateTables", "false"); + conf.set("hive.in.test", "true"); + + MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_CLIENT_FILTER_ENABLED, false); + MetastoreConf.setBoolVar(conf, ConfVars.METASTORE_SERVER_FILTER_ENABLED, false); + + MetaStoreTestUtils.setConfForStandloneMode(conf); + warehouse = new Warehouse(conf); + } + + @After + public void tearDown() throws Exception { + if (client != null) { + client.close(); + } + } + + /** + * This is called in each test after the configuration is set in each test case + * @throws Exception + */ + protected void creatEnv(Configuration conf) throws Exception { + client = createClient(conf); + + client.dropDatabase(DBNAME1, true, true, true); + client.dropDatabase(DBNAME2, true, true, true); + Database db1 = new DatabaseBuilder() + .setName(DBNAME1) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + Database db2 = new DatabaseBuilder() + .setName(DBNAME2) + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(client, conf); + new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB1) + .addCol("id", "int") + .addCol("name", "string") + .create(client, conf); + Table tab2 = new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB2) + .addCol("id", "int") + .addPartCol("name", "string") + .create(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value1") + .addToTable(client, conf); + new PartitionBuilder() + .inTable(tab2) + .addValue("value2") + .addToTable(client, conf); + } + + /** + * Test the pre-event listener is called in function get_fields at HMS server + * @throws Exception + */ + @Test + public void test_get_fields() throws Exception { + DBNAME1 = "db_test_get_fields_1"; + DBNAME2 = "db_test_get_fields_2"; + creatEnv(conf); + + // enable throwing exception, so we can check pre-envent listener is called + TestHmsServerAuthorization.DummyAuthorizationListenerImpl.throwExceptionAtCall = true; + + try { + List tableSchema = client.getFields(DBNAME1, TAB1); + fail("getFields() should fail with throw exception mode at server side"); + } catch (MetaException ex) { + boolean isMessageAuthorization = ex.getMessage().contains("Authorization fails"); + assertEquals(true, isMessageAuthorization); + } + } +}