diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactory.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactory.java new file mode 100644 index 0000000..9af7c79 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactory.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.DefaultPartitionExpressionProxy; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.events.EventCleanerTask; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.minihms.MiniHMS; + +import java.util.ArrayList; +import java.util.List; + +public class MetaStoreFactory { + private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100; + + /** + * We would like to run the tests with 2 configurations + * - Embedded + * - Remote - Where the MetaStore is started in a different thread + * @return The list of the test configurations + */ + public static List getMetaStores() throws Exception { + List metaStores = new ArrayList(); + + Configuration conf = MetastoreConf.newMetastoreConf(); + // set some values to use for getting conf. vars + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true); + MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.LIMIT_PARTITION_REQUEST, + DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, + DefaultPartitionExpressionProxy.class.getName()); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS, + EventCleanerTask.class.getName()); + + // Do this only on your own peril, and never in the production code + conf.set("datanucleus.autoCreateTables", "false"); + + // Example for using cluster configuration xml-s + // -Dtest.hms.client.configs=/tmp/conf/core-site.xml,/tmp/conf/hive-site.xml + String testHMSClientConfiguration = System.getProperty("test.hms.client.configs"); + if (testHMSClientConfiguration != null) { + Configuration clusterConf = new Configuration(conf); + // Loading the extra configuration options + String[] configurationFiles = testHMSClientConfiguration.split(","); + for(String configurationFile : configurationFiles) { + clusterConf.addResource(new Path(configurationFile)); + } + + // Using MetaStore running in an existing cluster + AbstractMetaStoreService cluster = + new MiniHMS.Builder() + .setConf(clusterConf) + .setType(MiniHMS.MiniHMSType.CLUSTER) + .build(); + metaStores.add(new Object[]{"Cluster", cluster}); + } + + // Create Embedded MetaStore + conf.set("javax.jdo.option.ConnectionURL", + "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db1;create=true"); + AbstractMetaStoreService embedded = + new MiniHMS.Builder() + .setConf(conf) + .setType(MiniHMS.MiniHMSType.EMBEDDED) + .build(); + metaStores.add(new Object[] { "Embedded", embedded}); + + // Create Remote MetaStore + conf.set("javax.jdo.option.ConnectionURL", + "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db2;create=true"); + AbstractMetaStoreService remote = + new MiniHMS.Builder() + .setConf(conf) + .setType(MiniHMS.MiniHMSType.REMOTE) + .build(); + metaStores.add(new Object[] { "Remote", remote}); + + return metaStores; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java new file mode 100644 index 0000000..a90e7e4 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java @@ -0,0 +1,578 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.transport.TTransportException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.HashMap; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +@RunWith(Parameterized.class) +public class TestDatabases { + // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13) + // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a + // Then we should remove our own copy + private static Set metaStoreServices = null; + private static final String DEFAULT_DATABASE = "default"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Database[] testDatabases = new Database[4]; + + @Parameterized.Parameters(name = "{0}") + public static List getMetaStoreToTest() throws Exception { + List result = MetaStoreFactory.getMetaStores(); + metaStoreServices = result.stream() + .map(test -> (AbstractMetaStoreService)test[1]) + .collect(Collectors.toSet()); + return result; + } + + public TestDatabases(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + // Needed until there is no junit release with @BeforeParam, @AfterParam (junit 4.13) + // https://github.com/junit-team/junit4/commit/1bf8438b65858565dbb64736bfe13aae9cfc1b5a + // Then we should move this to @AfterParam + @AfterClass + public static void stopMetaStores() throws Exception { + for(AbstractMetaStoreService metaStoreService : metaStoreServices) { + metaStoreService.stop(); + } + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the databases + for(String databaseName : client.getAllDatabases()) { + if (!databaseName.equals(DEFAULT_DATABASE)) { + client.dropDatabase(databaseName, true, true, true); + } + } + + testDatabases[0] = + new DatabaseBuilder().setName("test_database_1").build(); + testDatabases[1] = + new DatabaseBuilder().setName("test_database_to_find_1").build(); + testDatabases[2] = + new DatabaseBuilder().setName("test_database_to_find_2").build(); + testDatabases[3] = + new DatabaseBuilder().setName("test_database_hidden_1").build(); + + // Create the databases, and reload them from the MetaStore + for(int i=0; i < testDatabases.length; i++) { + client.createDatabase(testDatabases[i]); + testDatabases[i] = client.getDatabase(testDatabases[i].getName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + /** + * This test creates and queries a database and then drops it. Good for testing the happy path. + * @throws Exception + */ + @Test + public void testCreateGetDeleteDatabase() throws Exception { + Database database = getDatabaseWithAllParametersSet(); + client.createDatabase(database); + Database createdDatabase = client.getDatabase(database.getName()); + + // The createTime will be set on the server side, so the comparison should skip it + Assert.assertEquals("Comparing databases", database, createdDatabase); + Assert.assertTrue("The directory should be created", metaStore.isPathExists( + new Path(database.getLocationUri()))); + client.dropDatabase(database.getName()); + try { + client.getDatabase(database.getName()); + Assert.fail("Expected a NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test + public void testCreateDatabaseDefaultValues() throws Exception { + Database database = new Database(); + database.setName("dummy"); + + client.createDatabase(database); + Database createdDatabase = client.getDatabase(database.getName()); + + Assert.assertNull("Comparing description", createdDatabase.getDescription()); + Assert.assertEquals("Comparing location", metaStore.getWarehouseRoot() + "/" + + createdDatabase.getName() + ".db", createdDatabase.getLocationUri()); + Assert.assertEquals("Comparing parameters", new HashMap(), + createdDatabase.getParameters()); + Assert.assertNull("Comparing privileges", createdDatabase.getPrivileges()); + Assert.assertNull("Comparing owner name", createdDatabase.getOwnerName()); + Assert.assertEquals("Comparing owner type", PrincipalType.USER, createdDatabase.getOwnerType()); + } + + @Test + public void testCreateDatabaseInvalidData() throws Exception { + Database database = testDatabases[0]; + + // Missing class setting field + database.setName(null); + try { + client.createDatabase(database); + // Throwing InvalidObjectException would be more appropriate, but we do not change the API + Assert.fail("Expected an MetaException to be thrown"); + } catch (MetaException exception) { + // Expected exception + } + database.setName("test_database_1"); + + // Invalid character in new database name + database.setName("test_database_1;"); + try { + client.createDatabase(database); + // Throwing InvalidObjectException would be more appropriate, but we do not change the API + Assert.fail("Expected an InvalidObjectException to be thrown"); + } catch (InvalidObjectException exception) { + // Expected exception + } + database.setName("test_database_1"); + + // Empty new database name + database.setName(""); + try { + client.createDatabase(database); + // Throwing InvalidObjectException would be more appropriate, but we do not change the API + Assert.fail("Expected an InvalidObjectException to be thrown"); + } catch (InvalidObjectException exception) { + // Expected exception + } + database.setName("test_database_1"); + } + + @Test + public void testCreateDatabaseAlreadyExists() throws Exception { + Database database = testDatabases[0]; + try { + // Already existing database + client.createDatabase(database); + Assert.fail("Expected an AlreadyExistsException to be thrown"); + } catch (AlreadyExistsException exception) { + // Expected exception + } + } + + @Test + public void testDefaultDatabaseData() throws Exception { + Database database = client.getDatabase(DEFAULT_DATABASE); + Assert.assertEquals("Default database name", "default", database.getName()); + Assert.assertEquals("Default database description", "Default Hive database", + database.getDescription()); + Assert.assertEquals("Default database location", metaStore.getWarehouseRoot(), + new Path(database.getLocationUri())); + Assert.assertEquals("Default database parameters", new HashMap(), + database.getParameters()); + Assert.assertEquals("Default database owner", "public", database.getOwnerName()); + Assert.assertEquals("Default database owner type", PrincipalType.ROLE, database.getOwnerType()); + Assert.assertNull("Default database privileges", database.getPrivileges()); + } + + @Test + public void testGetDatabaseCaseInsensitive() throws Exception { + Database database = testDatabases[0]; + + // Test in upper case + Database resultUpper = client.getDatabase(database.getName().toUpperCase()); + Assert.assertEquals("Comparing databases", database, resultUpper); + + // Test in mixed case + Database resultMix = client.getDatabase("teST_dAtABase_1"); + Assert.assertEquals("Comparing databases", database, resultMix); + } + + @Test + public void testGetDatabaseNoSuchObject() throws Exception { + // No such database + try { + client.getDatabase("no_such_database"); + Assert.fail("Expected an NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test + public void testGetDatabaseInvalidData() throws Exception { + // Missing database name in the query + try { + client.getDatabase(null); + // TODO: Should have a check on the server side. + Assert.fail("Expected a NullPointerException or TTransportException to be thrown"); + } catch (NullPointerException exception) { + // Expected exception - Embedded MetaStore + } catch (TTransportException exception) { + // Expected exception - Remote MetaStore + } + } + + @Test + public void testDropDatabaseNoSuchObject() throws Exception { + // No such database + try { + client.dropDatabase("no_such_database"); + Assert.fail("Expected an NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test + public void testDropDatabaseInvalidData() throws Exception { + // Missing database in the query + try { + client.dropDatabase(null); + // TODO: Should be checked on server side + Assert.fail("Expected an NullPointerException or TTransportException to be thrown"); + } catch (NullPointerException exception) { + // Expected exception - Embedded MetaStore + } catch (TTransportException exception) { + // Expected exception - Remote MetaStore + } + + // Check if it is possible to drop default database + try { + client.dropDatabase(DEFAULT_DATABASE); + // TODO: Should be checked on server side + Assert.fail("Expected an MetaException or TTransportException to be thrown"); + } catch (MetaException exception) { + // Expected exception - Embedded MetaStore + } catch (TTransportException exception) { + // Expected exception - Remote MetaStore + } + } + + @Test + public void testDropDatabaseCaseInsensitive() throws Exception { + Database database = testDatabases[0]; + + // Test in upper case + client.dropDatabase(database.getName().toUpperCase()); + + // Test in mixed case + client.createDatabase(database); + client.dropDatabase("TesT_DatABaSe_1"); + } + + @Test + public void testDropDatabaseDeleteData() throws Exception { + Database database = testDatabases[0]; + Path dataFile = new Path(database.getLocationUri().toString() + "/dataFile"); + metaStore.createFile(dataFile, "100"); + + // Do not delete the data + client.dropDatabase(database.getName(), false, false); + // Check that the data still exist + Assert.assertTrue("The data file should still exist", metaStore.isPathExists(dataFile)); + + // Recreate the database + client.createDatabase(database); + Assert.assertTrue("The data file should still exist", metaStore.isPathExists(dataFile)); + + // Delete the data + client.dropDatabase(database.getName(), true, false); + // Check that the data is removed + Assert.assertFalse("The data file should not exist", metaStore.isPathExists(dataFile)); + } + + @Test + public void testDropDatabaseIgnoreUnknown() throws Exception { + // No such database + try { + client.dropDatabase("no_such_database", false, false); + Assert.fail("Expected an NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + + client.dropDatabase("no_such_database", false, true); + } + + @Test + public void testDropDatabaseCascade() throws Exception { + Database database = testDatabases[0]; + Table testTable = + new TableBuilder() + .setDbName(database.getName()) + .setTableName("test_table") + .addCol("test_col", "int") + .build(); + // FIXME: Change back if FunctionBuilder is committed: HIVE-18355 +/* + Function testFunction = + new FunctionBuilder() + .setDbName(database.getName()) + .setName("test_function") + .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper") + .build(); +*/ + Function testFunction = new Function(); + testFunction.setDbName(database.getName()); + testFunction.setFunctionName("test_function"); + testFunction.setClassName("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper"); + testFunction.setOwnerType(PrincipalType.USER); + testFunction.setFunctionType(FunctionType.JAVA); + + Index testIndex = + new IndexBuilder() + .setIndexName("test_index") + .setIndexTableName("test_index_table") + .setDbAndTableName(testTable) + .addCol("test_col", "int") + .build(); + Table testIndexTable = + new TableBuilder() + .setDbName(database.getName()) + .setTableName("test_index_table") + .addCol("test_col", "int") + .build(); + + // Drop database with table + client.createTable(testTable); + try { + // Without cascade + client.dropDatabase(database.getName(), true, true, false); + Assert.fail("Expected an InvalidOperationException to be thrown"); + } catch (InvalidOperationException exception) { + // Expected exception + } + // With cascade + client.dropDatabase(database.getName(), true, true, true); + + // Drop database with function + client.createDatabase(database); + client.createFunction(testFunction); + try { + // Without cascade + client.dropDatabase(database.getName(), true, true, false); + Assert.fail("Expected an InvalidOperationException to be thrown"); + } catch (InvalidOperationException exception) { + // Expected exception + } + // With cascade + client.dropDatabase(database.getName(), true, true, true); + + // Drop database with index + client.createDatabase(database); + client.createTable(testTable); + client.createIndex(testIndex, testIndexTable); + try { + // Without cascade + client.dropDatabase(database.getName(), true, true, false); + Assert.fail("Expected an InvalidOperationException to be thrown"); + } catch (InvalidOperationException exception) { + // Expected exception + } + // With cascade + // TODO: Known error, should be fixed + // client.dropDatabase(database.getName(), true, true, true); + // Need to drop index to clean up the mess + client.dropIndex(database.getName(), testTable.getTableName(), testIndex.getIndexName(), true); + } + + @Test + public void testGetAllDatabases() throws Exception { + List allDatabases = client.getAllDatabases(); + Assert.assertEquals("All databases size", 5, allDatabases.size()); + for(Database database : testDatabases) { + if (!database.getName().equals(DEFAULT_DATABASE)) { + Assert.assertTrue("Checking database names", allDatabases.contains(database.getName())); + } + } + + // Drop one database, see what remains + client.dropDatabase(testDatabases[1].getName()); + allDatabases = client.getAllDatabases(); + Assert.assertEquals("All databases size", 4, allDatabases.size()); + for(Database database : testDatabases) { + if (!database.getName().equals(DEFAULT_DATABASE) + && !database.getName().equals(testDatabases[1].getName())) { + Assert.assertTrue("Checking database names", allDatabases.contains(database.getName())); + } + } + } + + @Test + public void testGetDatabases() throws Exception { + // Find databases which name contains _to_find_ + List databases = client.getDatabases("*_to_find_*"); + Assert.assertEquals("Found databases size", 2, databases.size()); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1")); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2")); + + // Find databases which name contains _to_find_ or _hidden_ + databases = client.getDatabases("*_to_find_*|*_hidden_*"); + Assert.assertEquals("Found databases size", 3, databases.size()); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1")); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2")); + Assert.assertTrue("Should contain", databases.contains("test_database_hidden_1")); + + // Look for databases but do not find any + databases = client.getDatabases("*_not_such_database_*"); + Assert.assertEquals("No such databases size", 0, databases.size()); + + // Look for databases without pattern + databases = client.getDatabases(null); + Assert.assertEquals("Search databases without pattern size", 5, databases.size()); + } + + @Test + public void testGetDatabasesCaseInsensitive() throws Exception { + // Check case insensitive search + List databases = client.getDatabases("*_tO_FiND*"); + Assert.assertEquals("Found databases size", 2, databases.size()); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_1")); + Assert.assertTrue("Should contain", databases.contains("test_database_to_find_2")); + } + + @Test + public void testAlterDatabase() throws Exception { + Database originalDatabase = testDatabases[0]; + Database newDatabase = + new DatabaseBuilder() + // The database name is not changed during alter + .setName(originalDatabase.getName()) + .setOwnerType(PrincipalType.GROUP) + .setOwnerName("owner2") + .setLocation(metaStore.getWarehouseRoot() + "/database_location_2") + .setDescription("dummy description 2") + .addParam("param_key_1", "param_value_1_2") + .addParam("param_key_2_3", "param_value_2_3") + .build(); + + client.alterDatabase(originalDatabase.getName(), newDatabase); + Database alteredDatabase = client.getDatabase(newDatabase.getName()); + Assert.assertEquals("Comparing Databases", newDatabase, alteredDatabase); + } + + @Test + public void testAlterDatabaseNotNullableFields() throws Exception { + Database database = getDatabaseWithAllParametersSet(); + client.createDatabase(database); + Database originalDatabase = client.getDatabase(database.getName()); + Database newDatabase = new Database(); + newDatabase.setName("new_name"); + + client.alterDatabase(originalDatabase.getName(), newDatabase); + // The name should not be changed, so reload the db with the original name + Database alteredDatabase = client.getDatabase(originalDatabase.getName()); + Assert.assertEquals("Database name should not change", originalDatabase.getName(), + alteredDatabase.getName()); + Assert.assertEquals("Database description should not change", originalDatabase.getDescription(), + alteredDatabase.getDescription()); + Assert.assertEquals("Database location should not change", originalDatabase.getLocationUri(), + alteredDatabase.getLocationUri()); + Assert.assertEquals("Database parameters should be empty", new HashMap(), + alteredDatabase.getParameters()); + Assert.assertNull("Database owner should be empty", alteredDatabase.getOwnerName()); + Assert.assertEquals("Database owner type should not change", originalDatabase.getOwnerType(), + alteredDatabase.getOwnerType()); + Assert.assertNull("Database privileges should be empty", alteredDatabase.getPrivileges()); + } + + @Test + public void testAlterDatabaseNoSuchObject() throws Exception { + Database newDatabase = new DatabaseBuilder().setName("test_database_altered").build(); + + // No such database + try { + client.alterDatabase("no_such_database", newDatabase); + Assert.fail("Expected an NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test + public void testAlterDatabaseCaseInsensitive() throws Exception { + Database originalDatabase = testDatabases[0]; + Database newDatabase = originalDatabase.deepCopy(); + newDatabase.setDescription("Altered database"); + + // Test in upper case + client.alterDatabase(originalDatabase.getName().toUpperCase(), newDatabase); + Database alteredDatabase = client.getDatabase(newDatabase.getName()); + Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase); + + // Test in mixed case + originalDatabase = testDatabases[2]; + newDatabase = originalDatabase.deepCopy(); + newDatabase.setDescription("Altered database 2"); + client.alterDatabase("TeST_daTAbaSe_TO_FiNd_2", newDatabase); + alteredDatabase = client.getDatabase(newDatabase.getName()); + Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase); + } + + private Database getDatabaseWithAllParametersSet() throws Exception { + return new DatabaseBuilder() + .setName("dummy") + .setOwnerType(PrincipalType.ROLE) + .setOwnerName("owner") + .setLocation(metaStore.getWarehouseRoot() + "/database_location") + .setDescription("dummy description") + .addParam("param_key_1", "param_value_1") + .addParam("param_key_2", "param_value_2") + .build(); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java new file mode 100644 index 0000000..ed071f8 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.minihms; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicy; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; + +import java.io.IOException; +import java.util.Map; + +/** + * The tests should use this abstract class to access the MetaStore services. + * This abstract class ensures, that the same tests could be run against the different MetaStore + * configurations. + */ +public abstract class AbstractMetaStoreService { + protected Configuration configuration; + private Warehouse warehouse; + private FileSystem warehouseRootFs; + private Path trashDir; + + public AbstractMetaStoreService(Configuration configuration) { + this.configuration = new Configuration(configuration); + } + + /** + * Starts the MetaStoreService. Be aware, as the current MetaStore does not implement clean + * shutdown, starting MetaStoreService is possible only once per test. + * + * @throws Exception if any Exception occurs + */ + public void start() throws Exception { + warehouse = new Warehouse(configuration); + warehouseRootFs = warehouse.getFs(warehouse.getWhRoot()); + TrashPolicy trashPolicy = TrashPolicy.getInstance(configuration, warehouseRootFs); + trashDir = trashPolicy.getCurrentTrashDir(); + } + + /** + * Starts the service with adding extra configuration to the default ones. Be aware, as the + * current MetaStore does not implement clean shutdown, starting MetaStoreService is possible + * only once per test. + * + * @param confOverlay The extra parameters which should be set before starting the service + * @throws Exception if any Exception occurs + */ + public void start(Map confOverlay) throws Exception { + // Set confOverlay parameters + for (Map.Entry entry : confOverlay.entrySet()) { + MetastoreConf.setVar(configuration, entry.getKey(), entry.getValue()); + } + // Start the service + start(); + } + + /** + * Returns the MetaStoreClient for this MetaStoreService. + * + * @return The client connected to this service + * @throws MetaException if any Exception occurs during client configuration + */ + public IMetaStoreClient getClient() throws MetaException { + return new HiveMetaStoreClient(configuration); + } + + /** + * Returns the MetaStore Warehouse root directory name. + * + * @return The warehouse root directory + * @throws MetaException IO failure + */ + public Path getWarehouseRoot() throws MetaException { + return warehouse.getWhRoot(); + } + + /** + * Check if a path exists. + * + * @param path The path to check + * @return true if the path exists + * @throws IOException IO failure + */ + public boolean isPathExists(Path path) throws IOException { + return warehouseRootFs.exists(path); + } + + /** + * Check if a path exists in the thrash directory. + * + * @param path The path to check + * @return True if the path exists + * @throws IOException IO failure + */ + public boolean isPathExistsInTrash(Path path) throws IOException { + Path pathInTrash = new Path(trashDir.toUri().getScheme(), trashDir.toUri().getAuthority(), + trashDir.toUri().getPath() + path.toUri().getPath()); + return isPathExists(pathInTrash); + } + + /** + * Creates a file on the given path. + * + * @param path Destination path + * @param content The content of the file + * @throws IOException IO failure + */ + public void createFile(Path path, String content) throws IOException { + FSDataOutputStream outputStream = warehouseRootFs.create(path); + outputStream.write(content.getBytes()); + outputStream.close(); + } + + /** + * Cleans the warehouse and the thrash dirs in preparation for the tests. + * + * @throws MetaException IO failure + */ + public void cleanWarehouseDirs() throws MetaException { + warehouse.deleteDir(getWarehouseRoot(), true, true); + warehouse.deleteDir(trashDir, true, true); + } + + /** + * Stops the MetaStoreService. When MetaStore will implement clean shutdown, this method will + * call shutdown on MetaStore. Currently this does nothing :( + */ + public void stop() { + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStore.java new file mode 100644 index 0000000..e6aa447 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/ClusterMetaStore.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.minihms; + +import org.apache.hadoop.conf.Configuration; + +/** + * The MetaStore representation when the tests are running against a cluster. + */ +public class ClusterMetaStore extends AbstractMetaStoreService { + public ClusterMetaStore(Configuration configuration) { + super(configuration); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStore.java new file mode 100644 index 0000000..207eb9d --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/EmbeddedMetaStore.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.minihms; + + +import org.apache.hadoop.conf.Configuration; + +/** + * The MetaStore representation when the tests are running against an embedded MetaStore (in + * the same thread as the client). + */ +public class EmbeddedMetaStore extends AbstractMetaStoreService { + public EmbeddedMetaStore(Configuration configuration) { + super(configuration); + } +} \ No newline at end of file diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java new file mode 100644 index 0000000..52a78e9 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/MiniHMS.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.minihms; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; + +/** + * Mini HMS implementation, which can be used to run tests against different HMS configurations. + * Currently it supports 3 types: + * - EMBEDDED - MetaStore running in embedded mode + * - REMOTE - MetaStore running in the same process but in a dedicated thread and accessed + * through the Thrift interface + * - CLUSTER - In this case the MiniHMS is only a wrapper around the HMS running on a cluster, + * so the same tests could be run against a real cluster + */ +public class MiniHMS { + public enum MiniHMSType { + EMBEDDED, + REMOTE, + CLUSTER + } + + public static class Builder { + private Configuration metaStoreConf = MetastoreConf.newMetastoreConf(); + private MiniHMSType miniHMSType = MiniHMSType.EMBEDDED; + + public Builder() { + } + + public Builder setConf(Configuration conf) { + this.metaStoreConf = new Configuration(conf); + return this; + } + + public Builder setType(MiniHMSType type) { + this.miniHMSType = type; + return this; + } + + public AbstractMetaStoreService build() throws Exception { + switch (miniHMSType) { + case REMOTE: + return new RemoteMetaStore(metaStoreConf); + case EMBEDDED: + return new EmbeddedMetaStore(metaStoreConf); + case CLUSTER: + return new ClusterMetaStore(metaStoreConf); + default: + throw new IllegalArgumentException("Unexpected miniHMSType: " + miniHMSType); + } + } + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStore.java new file mode 100644 index 0000000..7f99575 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/RemoteMetaStore.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.minihms; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; + +/** + * The MetaStore representation when the tests are running against a MetaStore which running in a + * dedicated thread and accessed through the Thrift interface + */ +public class RemoteMetaStore extends AbstractMetaStoreService { + + public RemoteMetaStore(Configuration configuration) { + super(configuration); + } + + public void start() throws Exception { + // Get our own copy of hiveConf + int port = MetaStoreTestUtils.findFreePort(); + MetastoreConf.setVar(configuration, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setBoolVar(configuration, MetastoreConf.ConfVars.EXECUTE_SET_UGI, false); + MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), configuration); + super.start(); + } +} \ No newline at end of file