commit d9d1bb0f9a3bf3538fa3407c29b931e4e9ea716b Author: Alan Gates Date: Thu Oct 26 09:49:19 2017 -0700 HIVE-17982 Move metastore specific itests diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java new file mode 100644 index 0000000000..62bd94ab8e --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java @@ -0,0 +1,245 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.Before; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.thrift.TException; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class TestAcidTableSetup { + private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class); + protected static HiveMetaStoreClient client; + protected static Configuration conf; + + @Before + public void setUp() throws Exception { + conf = MetastoreConf.newMetastoreConf(); + + MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS, + DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class); + client = new HiveMetaStoreClient(conf); + } + + @Test + public void testTransactionalValidation() throws Throwable { + String dbName = "acidDb"; + silentDropDatabase(dbName); + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); + String tblName = "acidTable"; + Map fields = new HashMap<>(); + fields.put("name", ColumnType.STRING_TYPE_NAME); + fields.put("income", ColumnType.INT_TYPE_NAME); + + Type type = createType("Person1", fields); + + Map params = new HashMap<>(); + params.put("transactional", ""); + + /// CREATE TABLE scenarios + + // Fail - No "transactional" property is specified + try { + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setTableParams(params) + .setCols(type.getFields()) + .build(); + client.createTable(t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", + e.getMessage()); + } + + // Fail - "transactional" property is set to an invalid value + try { + params.clear(); + params.put("transactional", "foobar"); + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setTableParams(params) + .setCols(type.getFields()) + .build(); + client.createTable(t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", + e.getMessage()); + } + + // Fail - "transactional" is set to true, but the table is not bucketed + try { + params.clear(); + params.put("transactional", "true"); + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setTableParams(params) + .setCols(type.getFields()) + .build(); + client.createTable(t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", + e.getMessage()); + } + + List bucketCols = new ArrayList<>(); + bucketCols.add("income"); + // Fail - "transactional" is set to true, and the table is bucketed, but doesn't use ORC + try { + params.clear(); + params.put("transactional", "true"); + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setTableParams(params) + .setCols(type.getFields()) + .setBucketCols(bucketCols) + .build(); + client.createTable(t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", + e.getMessage()); + } + + // Succeed - "transactional" is set to true, and the table is bucketed, and uses ORC + params.clear(); + params.put("transactional", "true"); + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setTableParams(params) + .setCols(type.getFields()) + .setBucketCols(bucketCols) + .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") + .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") + .build(); + client.createTable(t); + assertTrue("CREATE TABLE should succeed", + "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); + + /// ALTER TABLE scenarios + + // Fail - trying to set "transactional" to "false" is not allowed + try { + params.clear(); + params.put("transactional", "false"); + t = new Table(); + t.setParameters(params); + t.setDbName(dbName); + t.setTableName(tblName); + client.alter_table(dbName, tblName, t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset: acidDb.acidTable", e.getMessage()); + } + + // Fail - trying to set "transactional" to "true" but doesn't satisfy bucketing and Input/OutputFormat requirement + try { + tblName += "1"; + params.clear(); + t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setCols(type.getFields()) + .setInputFormat("org.apache.hadoop.mapred.FileInputFormat") + .build(); + client.createTable(t); + params.put("transactional", "true"); + t.setParameters(params); + client.alter_table(dbName, tblName, t); + fail("Expected exception"); + } catch (MetaException e) { + assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable1", + e.getMessage()); + } + + // Succeed - trying to set "transactional" to "true", and satisfies bucketing and Input/OutputFormat requirement + tblName += "2"; + params.clear(); + t = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setCols(type.getFields()) + .setNumBuckets(1) + .setBucketCols(bucketCols) + .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") + .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") + .build(); + client.createTable(t); + params.put("transactional", "true"); + t.setParameters(params); + client.alter_table(dbName, tblName, t); + assertTrue("ALTER TABLE should succeed", + "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); + } + + private static void silentDropDatabase(String dbName) throws TException { + try { + for (String tableName : client.getTables(dbName, "*")) { + client.dropTable(dbName, tableName); + } + client.dropDatabase(dbName); + } catch (NoSuchObjectException|InvalidOperationException e) { + // NOP + } + } + + private Type createType(String typeName, Map fields) throws Throwable { + Type typ1 = new Type(); + typ1.setName(typeName); + typ1.setFields(new ArrayList<>(fields.size())); + for(String fieldName : fields.keySet()) { + typ1.getFields().add( + new FieldSchema(fieldName, fields.get(fieldName), "")); + } + client.createType(typ1); + return typ1; + } +} + diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java deleted file mode 100644 index e9dabee81d..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java +++ /dev/null @@ -1,45 +0,0 @@ -/** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ -package org.apache.hadoop.hive.metastore; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory; - -public class TestAdminUser extends TestCase{ - - public void testCreateAdminNAddUser() throws IOException, Throwable { - HiveConf conf = new HiveConf(); - conf.setVar(ConfVars.USERS_IN_ADMIN_ROLE, "adminuser"); - conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER,SQLStdHiveAuthorizerFactory.class.getName()); - RawStore rawStore = new HMSHandler("testcreateroot", conf).getMS(); - Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN); - assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN)); - assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, PrincipalType.ROLE) - .get(0).getGrantInfo().getPrivilege(),"All"); - assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0). - getRoleName(),HiveMetaStore.ADMIN); - } -} \ No newline at end of file diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java deleted file mode 100644 index 7b3a896f89..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import java.util.HashMap; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.ql.CommandNeedRetryException; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.thrift.TException; - -public class TestMarkPartition extends TestCase{ - - protected HiveConf hiveConf; - private IDriver driver; - - @Override - protected void setUp() throws Exception { - - super.setUp(); - System.setProperty("hive.metastore.event.clean.freq", "2"); - System.setProperty("hive.metastore.event.expiry.duration", "5"); - hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - SessionState.start(new CliSessionState(hiveConf)); - - } - - public void testMarkingPartitionSet() throws CommandNeedRetryException, MetaException, - TException, NoSuchObjectException, UnknownDBException, UnknownTableException, - InvalidPartitionException, UnknownPartitionException, InterruptedException { - HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - driver.run("drop database if exists hive2215 cascade"); - driver.run("create database hive2215"); - driver.run("use hive2215"); - driver.run("drop table if exists tmptbl"); - driver.run("create table tmptbl (a string) partitioned by (b string)"); - driver.run("alter table tmptbl add partition (b='2011')"); - Map kvs = new HashMap(); - kvs.put("b", "'2011'"); - msc.markPartitionForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE); - assert msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE); - Thread.sleep(10000); - assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE); - - kvs.put("b", "'2012'"); - assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE); - try{ - msc.markPartitionForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE); - assert false; - } catch(Exception e){ - assert e instanceof UnknownTableException; - } - try{ - msc.isPartitionMarkedForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE); - assert false; - } catch(Exception e){ - assert e instanceof UnknownTableException; - } - kvs.put("a", "'2012'"); - try{ - msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE); - assert false; - } catch(Exception e){ - assert e instanceof InvalidPartitionException; - } - } - - @Override - protected void tearDown() throws Exception { - driver.run("drop database if exists hive2215 cascade"); - super.tearDown(); - } - -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java deleted file mode 100644 index cc2c5f9a93..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import java.util.List; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.events.ListenerEvent; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; - -import junit.framework.TestCase; - -/** - * Ensure that the status of MetaStore events depend on the RawStore's commit status. - */ -public class TestMetaStoreEventListenerOnlyOnCommit extends TestCase { - - private HiveConf hiveConf; - private HiveMetaStoreClient msc; - private IDriver driver; - - @Override - protected void setUp() throws Exception { - - super.setUp(); - - DummyRawStoreControlledCommit.setCommitSucceed(true); - - System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, - DummyListener.class.getName()); - System.setProperty(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, - DummyRawStoreControlledCommit.class.getName()); - - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - - DummyListener.notifyList.clear(); - } - - public void testEventStatus() throws Exception { - int listSize = 0; - List notifyList = DummyListener.notifyList; - assertEquals(notifyList.size(), listSize); - - driver.run("CREATE DATABASE tmpDb"); - listSize += 1; - notifyList = DummyListener.notifyList; - assertEquals(notifyList.size(), listSize); - assertTrue(DummyListener.getLastEvent().getStatus()); - - driver.run("CREATE TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit (id INT) " + - "PARTITIONED BY (ds STRING)"); - listSize += 1; - notifyList = DummyListener.notifyList; - assertEquals(notifyList.size(), listSize); - assertTrue(DummyListener.getLastEvent().getStatus()); - - driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " + - "ADD PARTITION(ds='foo1')"); - listSize += 1; - notifyList = DummyListener.notifyList; - assertEquals(notifyList.size(), listSize); - assertTrue(DummyListener.getLastEvent().getStatus()); - - DummyRawStoreControlledCommit.setCommitSucceed(false); - - driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " + - "ADD PARTITION(ds='foo2')"); - listSize += 1; - notifyList = DummyListener.notifyList; - assertEquals(notifyList.size(), listSize); - assertFalse(DummyListener.getLastEvent().getStatus()); - - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java deleted file mode 100644 index 025cc4032d..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; - -import junit.framework.TestCase; - -/** - * TestMetaStoreInitListener. Test case for - * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener} - */ -public class TestMetaStoreInitListener extends TestCase { - private HiveConf hiveConf; - private HiveMetaStoreClient msc; - private IDriver driver; - - @Override - protected void setUp() throws Exception { - - super.setUp(); - System.setProperty("hive.metastore.init.hooks", - DummyMetaStoreInitListener.class.getName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - } - - public void testMetaStoreInitListener() throws Exception { - // DummyMataStoreInitListener's onInit will be called at HMSHandler - // initialization, and set this to true - assertTrue(DummyMetaStoreInitListener.wasCalled); - } - -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java deleted file mode 100644 index c7c35f3756..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import junit.framework.TestCase; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.util.StringUtils; - -/** - * - * TestRemoteHiveMetaStoreIpAddress. - * - * Test which checks that the remote Hive metastore stores the proper IP address using - * IpAddressListener - */ -public class TestRemoteHiveMetaStoreIpAddress extends TestCase { - private static boolean isServerStarted = false; - private static HiveConf hiveConf; - private static HiveMetaStoreClient msc; - - @Override - protected void setUp() throws Exception { - super.setUp(); - hiveConf = new HiveConf(this.getClass()); - - if (isServerStarted) { - assertNotNull("Unable to connect to the MetaStore server", msc); - return; - } - - System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname, - IpAddressListener.class.getName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - System.out.println("Started MetaStore Server on port " + port); - isServerStarted = true; - - // This is default case with setugi off for both client and server - createClient(port); - } - - public void testIpAddress() throws Exception { - try { - - Database db = new Database(); - db.setName("testIpAddressIp"); - msc.createDatabase(db); - msc.dropDatabase(db.getName()); - } catch (Exception e) { - System.err.println(StringUtils.stringifyException(e)); - System.err.println("testIpAddress() failed."); - throw e; - } - } - - protected void createClient(int port) throws Exception { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - msc = new HiveMetaStoreClient(hiveConf); - } -} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java deleted file mode 100644 index dfd80bc235..0000000000 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import junit.framework.Assert; -import junit.framework.TestCase; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; - -/** - * TestRetryingHMSHandler. Test case for - * {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler} - */ -public class TestRetryingHMSHandler extends TestCase { - private HiveConf hiveConf; - private HiveMetaStoreClient msc; - - @Override - protected void setUp() throws Exception { - - super.setUp(); - System.setProperty("hive.metastore.pre.event.listeners", - AlternateFailurePreListener.class.getName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERATTEMPTS, 2); - hiveConf.setTimeVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS); - hiveConf.setBoolVar(HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, false); - msc = new HiveMetaStoreClient(hiveConf); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - } - - // Create a database and a table in that database. Because the AlternateFailurePreListener is - // being used each attempt to create something should require two calls by the RetryingHMSHandler - public void testRetryingHMSHandler() throws Exception { - String dbName = "hive4159"; - String tblName = "tmptbl"; - - Database db = new Database(); - db.setName(dbName); - msc.createDatabase(db); - - Assert.assertEquals(2, AlternateFailurePreListener.getCallCount()); - - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); - - Map params = new HashMap(); - params.put("test_param_1", "Use this for comments etc"); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - - StorageDescriptor sd = new StorageDescriptor(); - - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(params); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tblName); - sd.getSerdeInfo().setParameters(serdParams); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - tbl.setSd(sd); - tbl.setLastAccessTime(0); - - msc.createTable(tbl); - - Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); - } - -} diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index cce3282165..d87863e156 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -44,6 +44,7 @@ ${project.build.directory}/tmp ${project.build.directory}/warehouse file:// + 1 1.0b3 @@ -506,6 +507,7 @@ true false + ${test.forkcount} -Xmx2048m false diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java index 6c8b1d80a9..50fc186a16 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java @@ -73,6 +73,11 @@ public IndexBuilder setIndexParams(Map indexParams) { return this; } + public IndexBuilder addIndexParam(String key, String value) { + indexParams.put(key, value); + return this; + } + public IndexBuilder setIndexName(String indexName) { this.indexName = indexName; return this; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java index 265625f95c..38e5a8fcb9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java @@ -53,9 +53,10 @@ public PartitionBuilder setTableName(String tableName) { return this; } - public PartitionBuilder setDbAndTableName(Table table) { + public PartitionBuilder fromTable(Table table) { this.dbName = table.getDbName(); this.tableName = table.getTableName(); + setCols(table.getSd().getCols()); return this; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index 1d457a6818..69acf3cfff 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -46,6 +48,8 @@ public TableBuilder() { tableParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); retention = 0; + partCols = new ArrayList<>(); + type = TableType.MANAGED_TABLE.name(); super.setChild(this); } @@ -90,7 +94,6 @@ public TableBuilder setPartCols(List partCols) { } public TableBuilder addPartCol(String name, String type, String comment) { - if (partCols == null) partCols = new ArrayList<>(); partCols.add(new FieldSchema(name, type, comment)); return this; } @@ -135,6 +138,13 @@ public TableBuilder setTemporary(boolean temporary) { return this; } + public TableBuilder fromIndex(Index index) { + dbName = index.getDbName(); + tableName = index.getIndexTableName(); + setCols(index.getSd().getCols()); + return this; + } + public Table build() throws MetaException { if (dbName == null || tableName == null) { throw new MetaException("You must set the database and table name"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 2e43dc85ea..64b063e19f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -485,7 +485,7 @@ public static ConfVars getMetaConf(String name) { "hive.metastore.hbase.file.metadata.threads", 1, "Number of threads to use to read file metadata in background to cache it."), FILTER_HOOK("metastore.filter.hook", "hive.metastore.filter.hook", - "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl", + org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl.class.getName(), "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager" + "is set to instance of HiveAuthorizerFactory, then this value is ignored."), FS_HANDLER_CLS("metastore.fs.handler.class", "hive.metastore.fs.handler.class", diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java similarity index 95% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java index 51be50426f..cc0bd776b6 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,18 +18,12 @@ package org.apache.hadoop.hive.metastore; -import java.lang.Exception; import java.lang.Override; -import java.lang.RuntimeException; -import java.lang.StackTraceElement; import java.sql.Array; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.Clob; -import java.sql.Connection; import java.sql.DatabaseMetaData; -import java.sql.DriverManager; -import java.sql.DriverPropertyInfo; import java.sql.NClob; import java.sql.PreparedStatement; import java.sql.SQLClientInfoException; @@ -45,20 +39,6 @@ import java.util.logging.Logger; import java.util.Properties; -import javax.jdo.JDOCanRetryException; - -import junit.framework.TestCase; -import org.junit.Test; - -import org.apache.derby.jdbc.EmbeddedDriver; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.ObjectStore; - -import org.apache.hadoop.hive.metastore.TestObjectStoreInitRetry; - - /** * Fake derby driver - companion class to enable testing by TestObjectStoreInitRetry */ @@ -421,4 +401,4 @@ public Logger getParentLogger() throws SQLFeatureNotSupportedException { } -}; +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 380f3a1fd0..d4820b3bd3 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.slf4j.Logger; @@ -51,6 +52,7 @@ public static int startMetaStore(Configuration conf) throws Exception { return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf); } + public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception { MetaStoreTestUtils.startMetaStore(port, bridge, null); } @@ -91,19 +93,22 @@ public static int startMetaStoreWithRetry() throws Exception { public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, Configuration conf) throws Exception { - Exception metaStoreException = null; - int metaStorePort = 0; + int metaStorePort = findFreePort(); + startMetaStoreWithRetry(metaStorePort, bridge, conf); + return metaStorePort; + } + private static void startMetaStoreWithRetry(int port, HadoopThriftAuthBridge bridge, + Configuration conf) throws Exception { + Exception metaStoreException = null; for (int tryCount = 0; tryCount < MetaStoreTestUtils.RETRY_COUNT; tryCount++) { try { - metaStorePort = MetaStoreTestUtils.findFreePort(); - MetaStoreTestUtils.startMetaStore(metaStorePort, bridge, conf); - return metaStorePort; + MetaStoreTestUtils.startMetaStore(port, bridge, conf); + return; } catch (ConnectException ce) { metaStoreException = ce; } } - throw metaStoreException; } @@ -198,11 +203,20 @@ public static int findFreePortExcepting(int portToExclude) throws IOException { /** * Setup a configuration file for standalone mode. There are a few config variables that have * defaults that require parts of Hive that aren't present in standalone mode. This method - * sets them to something that will work without the rest of Hive. + * sets them to something that will work without the rest of Hive. It only changes them if + * they have not already been set, to avoid clobbering intentional changes. * @param conf Configuration object */ public static void setConfForStandloneMode(Configuration conf) { - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS, - EventCleanerTask.class.getName()); + if (MetastoreConf.getVar(conf, ConfVars.TASK_THREADS_ALWAYS).equals( + ConfVars.TASK_THREADS_ALWAYS.getDefaultVal())) { + MetastoreConf.setVar(conf, ConfVars.TASK_THREADS_ALWAYS, + EventCleanerTask.class.getName()); + } + if (MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS).equals( + ConfVars.EXPRESSION_PROXY_CLASS.getDefaultVal())) { + MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS, + DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class); + } } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java new file mode 100644 index 0000000000..3b541d22b9 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.junit.Assert; +import org.junit.Test; + +public class TestAdminUser { + + @Test + public void testCreateAdminNAddUser() throws MetaException, NoSuchObjectException { + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setVar(conf, ConfVars.USERS_IN_ADMIN_ROLE, "adminuser"); + MetaStoreTestUtils.setConfForStandloneMode(conf); + RawStore rawStore = new HMSHandler("testcreateroot", conf).getMS(); + Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN); + Assert.assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN)); + Assert.assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, PrincipalType.ROLE) + .get(0).getGrantInfo().getPrivilege(),"All"); + Assert.assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0). + getRoleName(),HiveMetaStore.ADMIN); + } +} \ No newline at end of file diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java similarity index 73% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java index 462768d3bd..72758df7e7 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,32 +19,26 @@ package org.apache.hadoop.hive.metastore; import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Before; public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore { - @Override - protected void setUp() throws Exception { - super.setUp(); - warehouse = new Warehouse(hiveConf); + @Before + public void openWarehouse() throws Exception { + warehouse = new Warehouse(conf); client = createClient(); } - @Override - protected void tearDown() throws Exception { - try { - super.tearDown(); - client.close(); - } catch (Throwable e) { - System.err.println("Unable to close metastore"); - System.err.println(StringUtils.stringifyException(e)); - throw new Exception(e); - } + @After + public void tearDown() throws Exception { + client.close(); } @Override protected HiveMetaStoreClient createClient() throws Exception { try { - return new HiveMetaStoreClient(hiveConf); + return new HiveMetaStoreClient(conf); } catch (Throwable e) { System.err.println("Unable to open the metastore"); System.err.println(StringUtils.stringifyException(e)); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java similarity index 73% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 2382582b46..21046363fb 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +25,6 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.UtilsForTest; -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -36,9 +32,13 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -51,7 +51,7 @@ private static final Logger LOG = LoggerFactory.getLogger(TestFilterHooks.class); public static class DummyMetaStoreFilterHookImpl extends DefaultMetaStoreFilterHookImpl { - public static boolean blockResults = false; + private static boolean blockResults = false; public DummyMetaStoreFilterHookImpl(Configuration conf) { super(conf); @@ -60,7 +60,7 @@ public DummyMetaStoreFilterHookImpl(Configuration conf) { @Override public List filterDatabases(List dbList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterDatabases(dbList); } @@ -76,7 +76,7 @@ public Database filterDatabase(Database dataBase) throws NoSuchObjectException { @Override public List filterTableNames(String dbName, List tableList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterTableNames(dbName, tableList); } @@ -92,7 +92,7 @@ public Table filterTable(Table table) throws NoSuchObjectException { @Override public List filterTables(List
tableList) throws MetaException { if (blockResults) { - return new ArrayList
(); + return new ArrayList<>(); } return super.filterTables(tableList); } @@ -100,7 +100,7 @@ public Table filterTable(Table table) throws NoSuchObjectException { @Override public List filterPartitions(List partitionList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterPartitions(partitionList); } @@ -109,7 +109,7 @@ public Table filterTable(Table table) throws NoSuchObjectException { public List filterPartitionSpecs( List partitionSpecList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterPartitionSpecs(partitionSpecList); } @@ -126,7 +126,7 @@ public Partition filterPartition(Partition partition) throws NoSuchObjectExcepti public List filterPartitionNames(String dbName, String tblName, List partitionNames) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterPartitionNames(dbName, tblName, partitionNames); } @@ -143,7 +143,7 @@ public Index filterIndex(Index index) throws NoSuchObjectException { public List filterIndexNames(String dbName, String tblName, List indexList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterIndexNames(dbName, tblName, indexList); } @@ -151,7 +151,7 @@ public Index filterIndex(Index index) throws NoSuchObjectException { @Override public List filterIndexes(List indexeList) throws MetaException { if (blockResults) { - return new ArrayList(); + return new ArrayList<>(); } return super.filterIndexes(indexeList); } @@ -162,47 +162,69 @@ public Index filterIndex(Index index) throws NoSuchObjectException { private static final String TAB1 = "tab1"; private static final String TAB2 = "tab2"; private static final String INDEX1 = "idx1"; - private static HiveConf hiveConf; + private static Configuration conf; private static HiveMetaStoreClient msc; - private static IDriver driver; @BeforeClass public static void setUp() throws Exception { DummyMetaStoreFilterHookImpl.blockResults = false; - hiveConf = new HiveConf(TestFilterHooks.class); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.setVar(ConfVars.METASTORE_FILTER_HOOK, DummyMetaStoreFilterHookImpl.class.getName()); - UtilsForTest.setNewDerbyDbLocation(hiveConf, TestFilterHooks.class.getSimpleName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - - driver.run("drop database if exists " + DBNAME1 + " cascade"); - driver.run("drop database if exists " + DBNAME2 + " cascade"); - driver.run("create database " + DBNAME1); - driver.run("create database " + DBNAME2); - driver.run("use " + DBNAME1); - driver.run("create table " + DBNAME1 + "." + TAB1 + " (id int, name string)"); - driver.run("create table " + TAB2 + " (id int) partitioned by (name string)"); - driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value1')"); - driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value2')"); - driver.run("CREATE INDEX " + INDEX1 + " on table " + TAB1 + "(id) AS 'COMPACT' WITH DEFERRED REBUILD"); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + + msc = new HiveMetaStoreClient(conf); + + msc.dropDatabase(DBNAME1, true, true, true); + msc.dropDatabase(DBNAME2, true, true, true); + Database db1 = new DatabaseBuilder() + .setName(DBNAME1) + .build(); + msc.createDatabase(db1); + Database db2 = new DatabaseBuilder() + .setName(DBNAME2) + .build(); + msc.createDatabase(db2); + Table tab1 = new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB1) + .addCol("id", "int") + .addCol("name", "string") + .build(); + msc.createTable(tab1); + Table tab2 = new TableBuilder() + .setDbName(DBNAME1) + .setTableName(TAB2) + .addCol("id", "int") + .addPartCol("name", "string") + .build(); + msc.createTable(tab2); + Partition part1 = new PartitionBuilder() + .fromTable(tab2) + .addValue("value1") + .build(); + msc.add_partition(part1); + Partition part2 = new PartitionBuilder() + .fromTable(tab2) + .addValue("value2") + .build(); + msc.add_partition(part2); + Index index = new IndexBuilder() + .setDbAndTableName(tab1) + .setIndexName(INDEX1) + .setDeferredRebuild(true) + .addCol("id", "int") + .build(); + msc.createIndex(index, new TableBuilder().fromIndex(index).build()); } @AfterClass public static void tearDown() throws Exception { - DummyMetaStoreFilterHookImpl.blockResults = false; - driver.run("drop database if exists " + DBNAME1 + " cascade"); - driver.run("drop database if exists " + DBNAME2 + " cascade"); - driver.close(); - driver.destroy(); msc.close(); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java similarity index 72% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 0aa1d4e16a..2599ab103e 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import java.lang.reflect.Field; import java.io.IOException; import java.sql.Connection; +import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.ArrayList; @@ -31,28 +32,30 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Properties; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; - -import junit.framework.TestCase; - +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.datanucleus.api.jdo.JDOPersistenceManager; import org.datanucleus.api.jdo.JDOPersistenceManagerFactory; +import org.junit.Assert; +import org.junit.Before; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -68,13 +71,11 @@ import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; @@ -82,24 +83,23 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; -import org.junit.Assert; import org.junit.Test; import com.google.common.collect.Lists; -public abstract class TestHiveMetaStore extends TestCase { +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public abstract class TestHiveMetaStore { private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class); protected static HiveMetaStoreClient client; - protected static HiveConf hiveConf; + protected static Configuration conf; protected static Warehouse warehouse; protected static boolean isThriftClient = false; @@ -110,31 +110,32 @@ protected abstract HiveMetaStoreClient createClient() throws Exception; - @Override - protected void setUp() throws Exception { - hiveConf = new HiveConf(this.getClass()); - warehouse = new Warehouse(hiveConf); + @Before + public void setUp() throws Exception { + conf = MetastoreConf.newMetastoreConf(); + warehouse = new Warehouse(conf); // set some values to use for getting conf. vars - hiveConf.set("hive.metastore.metrics.enabled","true"); - hiveConf.set("hive.key1", "value1"); - hiveConf.set("hive.key2", "http://www.example.com"); - hiveConf.set("hive.key3", ""); - hiveConf.set("hive.key4", "0"); - hiveConf.set("datanucleus.autoCreateTables", "false"); - - hiveConf.setIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX, 2); - hiveConf.setIntVar(ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true); + conf.set("hive.key1", "value1"); + conf.set("hive.key2", "http://www.example.com"); + conf.set("hive.key3", ""); + conf.set("hive.key4", "0"); + conf.set("datanucleus.autoCreateTables", "false"); + + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2); + MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST); + MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class"); } + @Test public void testNameMethods() { - Map spec = new LinkedHashMap(); + Map spec = new LinkedHashMap<>(); spec.put("ds", "2008-07-01 14:13:12"); spec.put("hr", "14"); - List vals = new ArrayList(); - for(String v : spec.values()) { - vals.add(v); - } + List vals = new ArrayList<>(); + vals.addAll(spec.values()); String partName = "ds=2008-07-01 14%3A13%3A12/hr=14"; try { @@ -150,7 +151,7 @@ public void testNameMethods() { Map emptySpec = client.partitionNameToSpec(""); assertTrue("Spec should be empty", emptySpec.size() == 0); } catch (Exception e) { - assert(false); + fail(); } } @@ -158,13 +159,13 @@ public void testNameMethods() { * tests create table and partition and tries to drop the table without * droppping the partition * - * @throws Exception */ + @Test public void testPartition() throws Exception { - partitionTester(client, hiveConf); + partitionTester(client, conf); } - public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf) + private static void partitionTester(HiveMetaStoreClient client, Configuration conf) throws Exception { try { String dbName = "compdb"; @@ -182,56 +183,33 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf client.createDatabase(db); db = client.getDatabase(dbName); Path dbPath = new Path(db.getLocationUri()); - FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf); + FileSystem fs = FileSystem.get(dbPath.toUri(), conf); client.dropType(typeName); Type typ1 = new Type(); typ1.setName(typeName); - typ1.setFields(new ArrayList(2)); + typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( - new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( - new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); client.createType(typ1); - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(typ1.getFields()); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.setSortCols(new ArrayList()); - sd.setStoredAsSubDirectories(false); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - - //skewed information - SkewedInfo skewInfor = new SkewedInfo(); - skewInfor.setSkewedColNames(Arrays.asList("name")); - List skv = Arrays.asList("1"); - skewInfor.setSkewedColValues(Arrays.asList(skv)); - Map, String> scvlm = new HashMap, String>(); - scvlm.put(skv, "location1"); - skewInfor.setSkewedColValueLocationMaps(scvlm); - sd.setSkewedInfo(skewInfor); - - tbl.setPartitionKeys(new ArrayList(2)); - tbl.getPartitionKeys().add( - new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, "")); - tbl.getPartitionKeys().add( - new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, "")); + List skewedColValue = Collections.singletonList("1"); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setCols(typ1.getFields()) + .setNumBuckets(1) + .addBucketCol("name") + .addTableParam("test_param_1", "Use this for comments etc") + .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") + .addSkewedColName("name") + .setSkewedColValues(Collections.singletonList(skewedColValue)) + .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")) + .addPartCol("ds", ColumnType.STRING_TYPE_NAME) + .addPartCol("hr", ColumnType.STRING_TYPE_NAME) + .build(); client.createTable(tbl); @@ -306,9 +284,9 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf assertTrue("Partitions are not the same", part.equals(part_get)); // Test partition listing with a partial spec - ds is specified but hr is not - List partialVals = new ArrayList(); + List partialVals = new ArrayList<>(); partialVals.add(vals.get(0)); - Set parts = new HashSet(); + Set parts = new HashSet<>(); parts.add(part); parts.add(part2); @@ -317,7 +295,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf assertTrue("Should have returned 2 partitions", partial.size() == 2); assertTrue("Not all parts returned", partial.containsAll(parts)); - Set partNames = new HashSet(); + Set partNames = new HashSet<>(); partNames.add(partName); partNames.add(part2Name); List partialNames = client.listPartitionNames(dbName, tblName, partialVals, @@ -374,7 +352,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf // Test append_partition_by_name client.appendPartition(dbName, tblName, partName); Partition part5 = client.getPartition(dbName, tblName, part.getValues()); - assertTrue("Append partition by name failed", part5.getValues().equals(vals));; + assertTrue("Append partition by name failed", part5.getValues().equals(vals)); Path part5Path = new Path(part5.getSd().getLocation()); assertTrue(fs.exists(part5Path)); @@ -399,7 +377,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf Exception savedException; // add_partitions(empty list) : ok, normal operation - client.add_partitions(new ArrayList()); + client.add_partitions(new ArrayList<>()); // add_partitions(1,2,3) : ok, normal operation Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1"); @@ -459,7 +437,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf // recreate table as external, drop partition and it should // still exist - tbl.setParameters(new HashMap()); + tbl.setParameters(new HashMap<>()); tbl.getParameters().put("EXTERNAL", "TRUE"); client.createTable(tbl); retp = client.add_partition(part); @@ -482,8 +460,7 @@ public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf private static void verifyPartitionsPublished(HiveMetaStoreClient client, String dbName, String tblName, List partialSpec, - List expectedPartitions) - throws NoSuchObjectException, MetaException, TException { + List expectedPartitions) throws TException { // Test partition listing with a partial spec List mpartial = client.listPartitions(dbName, tblName, partialSpec, @@ -495,8 +472,8 @@ private static void verifyPartitionsPublished(HiveMetaStoreClient client, } private static List makeVals(String ds, String id) { - List vals4 = new ArrayList(2); - vals4 = new ArrayList(2); + List vals4 = new ArrayList<>(2); + vals4 = new ArrayList<>(2); vals4.add(ds); vals4.add(id); return vals4; @@ -508,7 +485,7 @@ private static Partition makePartitionObject(String dbName, String tblName, part4.setDbName(dbName); part4.setTableName(tblName); part4.setValues(ptnVals); - part4.setParameters(new HashMap()); + part4.setParameters(new HashMap<>()); part4.setSd(tbl.getSd().deepCopy()); part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy()); part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix); @@ -516,6 +493,7 @@ private static Partition makePartitionObject(String dbName, String tblName, return part4; } + @Test public void testListPartitions() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; @@ -524,7 +502,7 @@ public void testListPartitions() throws Throwable { cleanUp(dbName, tblName, typeName); - List> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -554,6 +532,7 @@ public void testListPartitions() throws Throwable { } + @Test public void testListPartitionsWihtLimitEnabled() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; @@ -563,7 +542,7 @@ public void testListPartitionsWihtLimitEnabled() throws Throwable { cleanUp(dbName, tblName, typeName); // Create too many partitions, just enough to validate over limit requests - List> values = new ArrayList>(); + List> values = new ArrayList<>(); for (int i=0; i> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -615,38 +595,39 @@ public void testAlterTableCascade() throws Throwable { createMultiPartitionTableSchema(dbName, tblName, typeName, values); Table tbl = client.getTable(dbName, tblName); List cols = tbl.getSd().getCols(); - cols.add(new FieldSchema("new_col", serdeConstants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, "")); tbl.getSd().setCols(cols); //add new column with cascade option client.alter_table(dbName, tblName, tbl, true); // Table tbl2 = client.getTable(dbName, tblName); - Assert.assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); - Assert.assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName()); + assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size()); + assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName()); //get a partition List pvalues = new ArrayList<>(2); pvalues.add("2008-07-01 14:13:12"); pvalues.add("14"); Partition partition = client.getPartition(dbName, tblName, pvalues); - Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); - Assert.assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName()); + assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName()); //add another column cols = tbl.getSd().getCols(); - cols.add(new FieldSchema("new_col2", serdeConstants.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("new_col2", ColumnType.STRING_TYPE_NAME, "")); tbl.getSd().setCols(cols); //add new column with no cascade option client.alter_table(dbName, tblName, tbl, false); tbl2 = client.getTable(dbName, tblName); - Assert.assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); - Assert.assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); + assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size()); + assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName()); //get partition, this partition should not have the newly added column since cascade option //was false partition = client.getPartition(dbName, tblName, pvalues); - Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); + assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size()); } + @Test public void testListPartitionNames() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; @@ -655,7 +636,7 @@ public void testListPartitionNames() throws Throwable { cleanUp(dbName, tblName, typeName); - List> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -688,6 +669,7 @@ public void testListPartitionNames() throws Throwable { } + @Test public void testDropTable() throws Throwable { // create a table with multiple partitions String dbName = "compdb"; @@ -696,7 +678,7 @@ public void testDropTable() throws Throwable { cleanUp(dbName, tblName, typeName); - List> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -719,6 +701,7 @@ public void testDropTable() throws Throwable { } + @Test public void testAlterViewParititon() throws Throwable { String dbName = "compdb"; String tblName = "comptbl"; @@ -731,27 +714,12 @@ public void testAlterViewParititon() throws Throwable { db.setDescription("Alter Partition Test database"); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setParameters(new HashMap()); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .build(); client.createTable(tbl); @@ -763,11 +731,11 @@ public void testAlterViewParititon() throws Throwable { tbl = client.getTable(dbName, tblName); } - ArrayList viewCols = new ArrayList(1); - viewCols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + ArrayList viewCols = new ArrayList<>(1); + viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); - ArrayList viewPartitionCols = new ArrayList(1); - viewPartitionCols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + ArrayList viewPartitionCols = new ArrayList<>(1); + viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); Table view = new Table(); view.setDbName(dbName); @@ -782,9 +750,9 @@ public void testAlterViewParititon() throws Throwable { view.setSd(viewSd); viewSd.setCols(viewCols); viewSd.setCompressed(false); - viewSd.setParameters(new HashMap()); + viewSd.setParameters(new HashMap<>()); viewSd.setSerdeInfo(new SerDeInfo()); - viewSd.getSerdeInfo().setParameters(new HashMap()); + viewSd.getSerdeInfo().setParameters(new HashMap<>()); client.createTable(view); @@ -796,14 +764,14 @@ public void testAlterViewParititon() throws Throwable { view = client.getTable(dbName, viewName); } - List vals = new ArrayList(1); + List vals = new ArrayList<>(1); vals.add("abc"); Partition part = new Partition(); part.setDbName(dbName); part.setTableName(viewName); part.setValues(vals); - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); client.add_partition(part); @@ -824,12 +792,13 @@ public void testAlterViewParititon() throws Throwable { client.dropDatabase(dbName); } + @Test public void testAlterPartition() throws Throwable { try { String dbName = "compdb"; String tblName = "comptbl"; - List vals = new ArrayList(2); + List vals = new ArrayList<>(2); vals.add("2008-07-01"); vals.add("14"); @@ -840,37 +809,17 @@ public void testAlterPartition() throws Throwable { db.setDescription("Alter Partition Test database"); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); - - tbl.setPartitionKeys(new ArrayList(2)); - tbl.getPartitionKeys().add( - new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, "")); - tbl.getPartitionKeys().add( - new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, "")); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .addTableParam("test_param_1", "Use this for comments etc") + .addBucketCol("name") + .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") + .addPartCol("ds", ColumnType.STRING_TYPE_NAME) + .addPartCol("hr", ColumnType.INT_TYPE_NAME) + .build(); client.createTable(tbl); @@ -886,7 +835,7 @@ public void testAlterPartition() throws Throwable { part.setDbName(dbName); part.setTableName(tblName); part.setValues(vals); - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); part.setSd(tbl.getSd()); part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); @@ -918,16 +867,17 @@ public void testAlterPartition() throws Throwable { } } + @Test public void testRenamePartition() throws Throwable { try { String dbName = "compdb1"; String tblName = "comptbl1"; - List vals = new ArrayList(2); + List vals = new ArrayList<>(2); vals.add("2011-07-11"); vals.add("8"); String part_path = "/ds=2011-07-11/hr=8"; - List tmp_vals = new ArrayList(2); + List tmp_vals = new ArrayList<>(2); tmp_vals.add("tmp_2011-07-11"); tmp_vals.add("-8"); String part2_path = "/ds=tmp_2011-07-11/hr=-8"; @@ -939,37 +889,14 @@ public void testRenamePartition() throws Throwable { db.setDescription("Rename Partition Test database"); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); - - tbl.setPartitionKeys(new ArrayList(2)); - tbl.getPartitionKeys().add( - new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, "")); - tbl.getPartitionKeys().add( - new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, "")); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .addPartCol("ds", ColumnType.STRING_TYPE_NAME) + .addPartCol("hr", ColumnType.INT_TYPE_NAME) + .build(); client.createTable(tbl); @@ -985,7 +912,7 @@ public void testRenamePartition() throws Throwable { part.setDbName(dbName); part.setTableName(tblName); part.setValues(vals); - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); part.setSd(tbl.getSd().deepCopy()); part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo()); part.getSd().setLocation(tbl.getSd().getLocation() + "/part1"); @@ -1051,16 +978,18 @@ public void testRenamePartition() throws Throwable { } } + @Test public void testDatabase() throws Throwable { try { // clear up any existing databases silentDropDatabase(TEST_DB1_NAME); silentDropDatabase(TEST_DB2_NAME); - Database db = new Database(); - db.setName(TEST_DB1_NAME); - db.setOwnerName(SessionState.getUserFromAuthenticator()); - db.setOwnerType(PrincipalType.USER); + Database db = new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setOwnerName(SecurityUtils.getUser()) + .build(); + Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName()); client.createDatabase(db); db = client.getDatabase(TEST_DB1_NAME); @@ -1069,7 +998,7 @@ public void testDatabase() throws Throwable { TEST_DB1_NAME, db.getName()); assertEquals("location of the returned db is different from that of inserted db", warehouse.getDatabasePath(db).toString(), db.getLocationUri()); - assertEquals(db.getOwnerName(), SessionState.getUserFromAuthenticator()); + assertEquals(db.getOwnerName(), SecurityUtils.getUser()); assertEquals(db.getOwnerType(), PrincipalType.USER); Database db2 = new Database(); db2.setName(TEST_DB2_NAME); @@ -1098,6 +1027,7 @@ public void testDatabase() throws Throwable { } } + @Test public void testDatabaseLocationWithPermissionProblems() throws Exception { // Note: The following test will fail if you are running this test as root. Setting @@ -1114,10 +1044,10 @@ public void testDatabaseLocationWithPermissionProblems() throws Exception { Database db = new Database(); db.setName(TEST_DB1_NAME); String dbLocation = - HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test/_testDB_create_"; - FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf); + MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_"; + FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.mkdirs( - new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), + new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 0)); db.setLocationUri(dbLocation); @@ -1137,14 +1067,15 @@ public void testDatabaseLocationWithPermissionProblems() throws Exception { } } - fs.setPermission(new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), + fs.setPermission(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 755)); - fs.delete(new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), true); + fs.delete(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), true); } assertTrue("Database creation succeeded even with permission problem", createFailed); } + @Test public void testDatabaseLocation() throws Throwable { try { // clear up any existing databases @@ -1153,7 +1084,7 @@ public void testDatabaseLocation() throws Throwable { Database db = new Database(); db.setName(TEST_DB1_NAME); String dbLocation = - HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_create_"; + MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_"; db.setLocationUri(dbLocation); client.createDatabase(db); @@ -1178,8 +1109,8 @@ public void testDatabaseLocation() throws Throwable { db = new Database(); db.setName(TEST_DB1_NAME); dbLocation = - HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_file_"; - FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf); + MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_"; + FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.createNewFile(new Path(dbLocation)); fs.deleteOnExit(new Path(dbLocation)); db.setLocationUri(dbLocation); @@ -1210,25 +1141,26 @@ public void testDatabaseLocation() throws Throwable { } + @Test public void testSimpleTypeApi() throws Exception { try { - client.dropType(serdeConstants.INT_TYPE_NAME); + client.dropType(ColumnType.INT_TYPE_NAME); Type typ1 = new Type(); - typ1.setName(serdeConstants.INT_TYPE_NAME); + typ1.setName(ColumnType.INT_TYPE_NAME); boolean ret = client.createType(typ1); assertTrue("Unable to create type", ret); - Type typ1_2 = client.getType(serdeConstants.INT_TYPE_NAME); + Type typ1_2 = client.getType(ColumnType.INT_TYPE_NAME); assertNotNull(typ1_2); assertEquals(typ1.getName(), typ1_2.getName()); - ret = client.dropType(serdeConstants.INT_TYPE_NAME); + ret = client.dropType(ColumnType.INT_TYPE_NAME); assertTrue("unable to drop type integer", ret); boolean exceptionThrown = false; try { - client.getType(serdeConstants.INT_TYPE_NAME); + client.getType(ColumnType.INT_TYPE_NAME); } catch (NoSuchObjectException e) { exceptionThrown = true; } @@ -1241,17 +1173,18 @@ public void testSimpleTypeApi() throws Exception { } // TODO:pc need to enhance this with complex fields and getType_all function + @Test public void testComplexTypeApi() throws Exception { try { client.dropType("Person"); Type typ1 = new Type(); typ1.setName("Person"); - typ1.setFields(new ArrayList(2)); + typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( - new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( - new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); boolean ret = client.createType(typ1); assertTrue("Unable to create type", ret); @@ -1266,9 +1199,9 @@ public void testComplexTypeApi() throws Exception { Type fam = new Type(); fam.setName("Family"); - fam.setFields(new ArrayList(2)); + fam.setFields(new ArrayList<>(2)); fam.getFields().add( - new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); fam.getFields().add( new FieldSchema("members", ColumnType.getListType(typ1.getName()), "")); @@ -1303,6 +1236,7 @@ public void testComplexTypeApi() throws Exception { } } + @Test public void testSimpleTable() throws Exception { try { String dbName = "simpdb"; @@ -1320,36 +1254,21 @@ public void testSimpleTable() throws Exception { client.dropType(typeName); Type typ1 = new Type(); typ1.setName(typeName); - typ1.setFields(new ArrayList(2)); + typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( - new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( - new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); client.createType(typ1); - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(typ1.getFields()); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib( - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setInputFormat(HiveOutputFormat.class.getName()); - - tbl.setPartitionKeys(new ArrayList()); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setCols(typ1.getFields()) + .setNumBuckets(1) + .addBucketCol("name") + .addStorageDescriptorParam("test_param_1", "Use this for comments etc") + .build(); client.createTable(tbl); @@ -1370,12 +1289,11 @@ public void testSimpleTable() throws Exception { assertEquals(tbl2.getSd().getNumBuckets(), 1); assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation()); assertNotNull(tbl2.getSd().getSerdeInfo()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1"); + tbl.getSd().getSerdeInfo().setParameters(new HashMap<>()); + tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1"); tbl2.setTableName(tblName2); - tbl2.setParameters(new HashMap()); + tbl2.setParameters(new HashMap<>()); tbl2.getParameters().put("EXTERNAL", "TRUE"); tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2"); @@ -1438,7 +1356,7 @@ public void testSimpleTable() throws Exception { || (tbl2.getPartitionKeys().size() == 0)); //test get_table_objects_by_name functionality - ArrayList tableNames = new ArrayList(); + ArrayList tableNames = new ArrayList<>(); tableNames.add(tblName2); tableNames.add(tblName); tableNames.add(tblName2); @@ -1490,7 +1408,7 @@ public void testSimpleTable() throws Exception { assertNotNull(udbe); assertTrue("DB not found", udbe.getMessage().contains("is null or empty")); - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf); client.dropTable(dbName, tblName); assertFalse(fs.exists(new Path(tbl.getSd().getLocation()))); @@ -1508,6 +1426,7 @@ public void testSimpleTable() throws Exception { // Tests that in the absence of stats for partitions, and/or absence of columns // to get stats for, the metastore does not break. See HIVE-12083 for motivation. + @Test public void testStatsFastTrivial() throws Throwable { String dbName = "tstatsfast"; String tblName = "t1"; @@ -1517,7 +1436,7 @@ public void testStatsFastTrivial() throws Throwable { cleanUp(dbName,tblName,typeName); - List> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -1525,10 +1444,10 @@ public void testStatsFastTrivial() throws Throwable { createMultiPartitionTableSchema(dbName, tblName, typeName, values); - List emptyColNames = new ArrayList(); - List emptyPartNames = new ArrayList(); + List emptyColNames = new ArrayList<>(); + List emptyPartNames = new ArrayList<>(); - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); colNames.add("name"); colNames.add("income"); List partNames = client.listPartitionNames(dbName,tblName,(short)-1); @@ -1568,6 +1487,7 @@ public void testStatsFastTrivial() throws Throwable { } + @Test public void testColumnStatistics() throws Throwable { String dbName = "columnstatstestdb"; @@ -1594,7 +1514,7 @@ public void testColumnStatistics() throws Throwable { String[] colType = new String[] {"double", "string"}; boolean isTblLevel = true; String partName = null; - List statsObjs = new ArrayList(); + List statsObjs = new ArrayList<>(); ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); statsDesc.setDbName(dbName); @@ -1647,8 +1567,8 @@ public void testColumnStatistics() throws Throwable { // compare stats obj to ensure what we get is what we wrote assertNotNull(colStats2); assertEquals(colStats2.getColName(), colName[0]); - assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue); - assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue); + assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01); + assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01); assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls); assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs); @@ -1674,7 +1594,7 @@ public void testColumnStatistics() throws Throwable { // create a table with multiple partitions cleanUp(dbName, tblName, typeName); - List> values = new ArrayList>(); + List> values = new ArrayList<>(); values.add(makeVals("2008-07-01 14:13:12", "14")); values.add(makeVals("2008-07-01 14:13:12", "15")); values.add(makeVals("2008-07-02 14:13:12", "15")); @@ -1708,7 +1628,7 @@ public void testColumnStatistics() throws Throwable { assertEquals(colStats.getStatsDesc().getPartName(), partName); assertEquals(colStats2.getColName(), colName[1]); assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen); - assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen); + assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01); assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls); assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs); @@ -1730,61 +1650,30 @@ public void testColumnStatistics() throws Throwable { } } - static class ClassNotFoundSerde extends LazySimpleSerDe { - - public ClassNotFoundSerde() throws Exception { - } - - @Override - public void initialize(Configuration job, Properties tbl) throws SerDeException { - super.initialize(job, tbl); - throw new NoClassDefFoundError(); - } - - } - - public void testGetSchemaWithNoClassDefFoundError() throws Exception { - try { - String dbName = "testDb"; - String tblName = "testTable"; - - client.dropTable(dbName, tblName); - silentDropDatabase(dbName); - - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); + @Test(expected = MetaException.class) + public void testGetSchemaWithNoClassDefFoundError() throws TException { + String dbName = "testDb"; + String tblName = "testTable"; - ArrayList cols = new ArrayList(1); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + client.dropTable(dbName, tblName); + silentDropDatabase(dbName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - SerDeInfo serdeInfo = new SerDeInfo(); - sd.setSerdeInfo(serdeInfo); - serdeInfo.setSerializationLib(ClassNotFoundSerde.class.getName()); + Database db = new Database(); + db.setName(dbName); + client.createDatabase(db); - client.createTable(tbl); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME, "") + .setSerdeLib("no.such.class") + .build(); + client.createTable(tbl); - Boolean MetaExceptionCaught = false; - try { - client.getSchema(dbName, tblName); - } catch (MetaException me) { - MetaExceptionCaught = true; - } - assertTrue("MetaException is expected to be caught for throwing NoClassDefFoundError", MetaExceptionCaught); - } catch (Throwable e) { - System.err.println(StringUtils.stringifyException(e)); - System.err.println("testGetSchemaWithNoClassDefFoundError() failed."); - throw e; - } + client.getSchema(dbName, tblName); } + @Test public void testAlterTable() throws Exception { String dbName = "alterdb"; String invTblName = "alter-tbl"; @@ -1798,31 +1687,16 @@ public void testAlterTable() throws Exception { db.setName(dbName); client.createDatabase(db); - ArrayList invCols = new ArrayList(2); - invCols.add(new FieldSchema("n-ame", serdeConstants.STRING_TYPE_NAME, "")); - invCols.add(new FieldSchema("in.come", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(invTblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(invCols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - + ArrayList invCols = new ArrayList<>(2); + invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, "")); + invCols.add(new FieldSchema("in.come", ColumnType.INT_TYPE_NAME, "")); + + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(invTblName) + .setCols(invCols) + .build(); + boolean failed = false; try { client.createTable(tbl); @@ -1835,8 +1709,8 @@ public void testAlterTable() throws Exception { } // create an invalid table which has wrong column type - ArrayList invColsInvType = new ArrayList(2); - invColsInvType.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + ArrayList invColsInvType = new ArrayList<>(2); + invColsInvType.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); invColsInvType.add(new FieldSchema("income", "xyz", "")); tbl.setTableName(tblName); tbl.getSd().setCols(invColsInvType); @@ -1851,9 +1725,9 @@ public void testAlterTable() throws Exception { false); } - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + ArrayList cols = new ArrayList<>(2); + cols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); + cols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); // create a valid table tbl.setTableName(tblName); @@ -1920,7 +1794,7 @@ public void testAlterTable() throws Exception { assertEquals("Alter table didn't succeed. Num buckets is different ", tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets()); // check that data has moved - FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf); + FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf); assertFalse("old table location still exists", fs.exists(new Path(tbl .getSd().getLocation()))); assertTrue("data did not move to new location", fs.exists(new Path(tbl3 @@ -1950,6 +1824,7 @@ public void testAlterTable() throws Exception { } } + @Test public void testComplexTable() throws Exception { String dbName = "compdb"; @@ -1966,42 +1841,23 @@ public void testComplexTable() throws Exception { client.dropType(typeName); Type typ1 = new Type(); typ1.setName(typeName); - typ1.setFields(new ArrayList(2)); + typ1.setFields(new ArrayList<>(2)); typ1.getFields().add( - new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); + new FieldSchema("name", ColumnType.STRING_TYPE_NAME, "")); typ1.getFields().add( - new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); + new FieldSchema("income", ColumnType.INT_TYPE_NAME, "")); client.createType(typ1); - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(typ1.getFields()); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.getParameters().put("test_param_1", "Use this for comments etc"); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "9"); - sd.getSerdeInfo().setSerializationLib( - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - - tbl.setPartitionKeys(new ArrayList(2)); - tbl.getPartitionKeys().add( - new FieldSchema("ds", - org.apache.hadoop.hive.serde.serdeConstants.DATE_TYPE_NAME, "")); - tbl.getPartitionKeys().add( - new FieldSchema("hr", - org.apache.hadoop.hive.serde.serdeConstants.INT_TYPE_NAME, "")); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .setCols(typ1.getFields()) + .addPartCol("ds", ColumnType.DATE_TYPE_NAME) + .addPartCol("hr", ColumnType.INT_TYPE_NAME) + .setNumBuckets(1) + .addBucketCol("name") + .addStorageDescriptorParam("test_param_1","Use this for comments etc") + .build(); client.createTable(tbl); @@ -2019,9 +1875,9 @@ public void testComplexTable() throws Exception { assertNotNull(tbl2.getPartitionKeys()); assertEquals(2, tbl2.getPartitionKeys().size()); - assertEquals(serdeConstants.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0) + assertEquals(ColumnType.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0) .getType()); - assertEquals(serdeConstants.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1) + assertEquals(ColumnType.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1) .getType()); assertEquals("ds", tbl2.getPartitionKeys().get(0).getName()); assertEquals("hr", tbl2.getPartitionKeys().get(1).getName()); @@ -2055,6 +1911,7 @@ public void testComplexTable() throws Exception { } } + @Test public void testTableDatabase() throws Exception { String dbName = "testDb"; String tblName_1 = "testTbl_1"; @@ -2066,33 +1923,18 @@ public void testTableDatabase() throws Exception { Database db = new Database(); db.setName(dbName); String dbLocation = - HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "_testDB_table_create_"; + MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_"; db.setLocationUri(dbLocation); client.createDatabase(db); db = client.getDatabase(dbName); - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName_1); - - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - StorageDescriptor sd = new StorageDescriptor(); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "9"); - sd.getSerdeInfo().setSerializationLib( - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - - tbl.setSd(sd); - tbl.getSd().setCols(cols); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName_1) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .build(); + client.createTable(tbl); tbl = client.getTable(dbName, tblName_1); @@ -2111,6 +1953,7 @@ public void testTableDatabase() throws Exception { } + @Test public void testGetConfigValue() { String val = "value"; @@ -2123,12 +1966,9 @@ public void testGetConfigValue() { assertEquals(client.getConfigValue("hive.key4", val), "0"); assertEquals(client.getConfigValue("hive.key5", val), val); assertEquals(client.getConfigValue(null, val), val); - } catch (ConfigValSecurityException e) { - e.printStackTrace(); - assert (false); } catch (TException e) { e.printStackTrace(); - assert (false); + fail(); } } @@ -2140,35 +1980,34 @@ public void testGetConfigValue() { threwException = true; } catch (TException e) { e.printStackTrace(); - assert (false); + fail(); } assert (threwException); } private static void adjust(HiveMetaStoreClient client, Partition part, - String dbName, String tblName) - throws NoSuchObjectException, MetaException, TException { + String dbName, String tblName) throws TException { Partition part_get = client.getPartition(dbName, tblName, part.getValues()); part.setCreateTime(part_get.getCreateTime()); part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime())); } - private static void silentDropDatabase(String dbName) throws MetaException, TException { + private static void silentDropDatabase(String dbName) throws TException { try { for (String tableName : client.getTables(dbName, "*")) { client.dropTable(dbName, tableName); } client.dropDatabase(dbName); - } catch (NoSuchObjectException e) { - } catch (InvalidOperationException e) { + } catch (NoSuchObjectException|InvalidOperationException e) { + // NOP } } /** * Tests for list partition by filter functionality. - * @throws Exception */ + @Test public void testPartitionFilter() throws Exception { String dbName = "filterdb"; String tblName = "filtertbl"; @@ -2179,36 +2018,15 @@ public void testPartitionFilter() throws Exception { db.setName(dbName); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); - - ArrayList partCols = new ArrayList(3); - partCols.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, "")); - partCols.add(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, "")); - partCols.add(new FieldSchema("p3", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.setBucketCols(new ArrayList()); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); - - tbl.setPartitionKeys(partCols); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("c1", ColumnType.STRING_TYPE_NAME) + .addCol("c2", ColumnType.INT_TYPE_NAME) + .addPartCol("p1", ColumnType.STRING_TYPE_NAME) + .addPartCol("p2", ColumnType.STRING_TYPE_NAME) + .addPartCol("p3", ColumnType.INT_TYPE_NAME) + .build(); client.createTable(tbl); tbl = client.getTable(dbName, tblName); @@ -2355,17 +2173,17 @@ public void testPartitionFilter() throws Exception { /** * Test filtering on table with single partition - * @throws Exception */ + @Test public void testFilterSinglePartition() throws Exception { String dbName = "filterdb"; String tblName = "filtertbl"; - List vals = new ArrayList(1); + List vals = new ArrayList<>(1); vals.add("p11"); - List vals2 = new ArrayList(1); + List vals2 = new ArrayList<>(1); vals2.add("p12"); - List vals3 = new ArrayList(1); + List vals3 = new ArrayList<>(1); vals3.add("p13"); silentDropDatabase(dbName); @@ -2374,34 +2192,13 @@ public void testFilterSinglePartition() throws Exception { db.setName(dbName); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); - - ArrayList partCols = new ArrayList(1); - partCols.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(new HashMap()); - sd.setBucketCols(new ArrayList()); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); - - tbl.setPartitionKeys(partCols); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("c1", ColumnType.STRING_TYPE_NAME) + .addCol("c2", ColumnType.INT_TYPE_NAME) + .addPartCol("p1", ColumnType.STRING_TYPE_NAME) + .build(); client.createTable(tbl); tbl = client.getTable(dbName, tblName); @@ -2425,19 +2222,19 @@ public void testFilterSinglePartition() throws Exception { /** * Test filtering based on the value of the last partition - * @throws Exception */ + @Test public void testFilterLastPartition() throws Exception { String dbName = "filterdb"; String tblName = "filtertbl"; - List vals = new ArrayList(2); + List vals = new ArrayList<>(2); vals.add("p11"); vals.add("p21"); - List vals2 = new ArrayList(2); + List vals2 = new ArrayList<>(2); vals2.add("p11"); vals2.add("p22"); - List vals3 = new ArrayList(2); + List vals3 = new ArrayList<>(2); vals3.add("p12"); vals3.add("p21"); @@ -2445,23 +2242,15 @@ public void testFilterLastPartition() throws Exception { createDb(dbName); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("c1", ColumnType.STRING_TYPE_NAME) + .addCol("c2", ColumnType.INT_TYPE_NAME) + .addPartCol("p1", ColumnType.STRING_TYPE_NAME) + .addPartCol("p2", ColumnType.STRING_TYPE_NAME) + .build(); - ArrayList partCols = new ArrayList(2); - partCols.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, "")); - partCols.add(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, "")); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - StorageDescriptor sd = createStorageDescriptor(tblName, partCols, null, serdParams); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - tbl.setSd(sd); - tbl.setPartitionKeys(partCols); client.createTable(tbl); tbl = client.getTable(dbName, tblName); @@ -2490,8 +2279,7 @@ public void testFilterLastPartition() throws Exception { } private void checkFilter(HiveMetaStoreClient client, String dbName, - String tblName, String filter, int expectedCount) - throws MetaException, NoSuchObjectException, TException { + String tblName, String filter, int expectedCount) throws TException { LOG.debug("Testing filter: " + filter); List partitions = client.listPartitionsByFilter(dbName, tblName, filter, (short) -1); @@ -2501,14 +2289,13 @@ private void checkFilter(HiveMetaStoreClient client, String dbName, } private void add_partition(HiveMetaStoreClient client, Table table, - List vals, String location) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException { + List vals, String location) throws TException { Partition part = new Partition(); part.setDbName(table.getDbName()); part.setTableName(table.getTableName()); part.setValues(vals); - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); part.setSd(table.getSd().deepCopy()); part.getSd().setSerdeInfo(table.getSd().getSerdeInfo()); part.getSd().setLocation(table.getSd().getLocation() + location); @@ -2521,6 +2308,7 @@ private void add_partition(HiveMetaStoreClient client, Table table, * actually test multithreading, but does verify that the proxy * at least works correctly. */ + @Test public void testSynchronized() throws Exception { int currentNumberOfDbs = client.getAllDatabases().size(); @@ -2530,6 +2318,7 @@ public void testSynchronized() throws Exception { assertEquals(currentNumberOfDbs, databases.size()); } + @Test public void testTableFilter() throws Exception { try { String dbName = "testTableFilter"; @@ -2670,30 +2459,24 @@ public void testTableFilter() throws Exception { private Table createTableForTestFilter(String dbName, String tableName, String owner, int lastAccessTime, boolean hasSecondParam) throws Exception { - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - Map params = new HashMap(); - params.put("sd_param_1", "Use this for comments etc"); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - - StorageDescriptor sd = createStorageDescriptor(tableName, cols, params, serdParams); - - Map partitionKeys = new HashMap(); - partitionKeys.put("ds", serdeConstants.STRING_TYPE_NAME); - partitionKeys.put("hr", serdeConstants.INT_TYPE_NAME); - - Map tableParams = new HashMap(); + Map tableParams = new HashMap<>(); tableParams.put("test_param_1", "hi"); if(hasSecondParam) { tableParams.put("test_param_2", "50"); } - Table tbl = createTable(dbName, tableName, owner, tableParams, - partitionKeys, sd, lastAccessTime); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tableName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .addPartCol("ds", ColumnType.STRING_TYPE_NAME) + .addPartCol("hr", ColumnType.INT_TYPE_NAME) + .setTableParams(tableParams) + .setOwner(owner) + .setLastAccessTime(lastAccessTime) + .build(); + client.createTable(tbl); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2708,8 +2491,8 @@ private Table createTableForTestFilter(String dbName, String tableName, String o * Verify that if another client, either a metastore Thrift server or a Hive CLI instance * renames a table recently created by this instance, and hence potentially in its cache, the * current instance still sees the change. - * @throws Exception */ + @Test public void testConcurrentMetastores() throws Exception { String dbName = "concurrentdb"; String tblName = "concurrenttbl"; @@ -2720,19 +2503,13 @@ public void testConcurrentMetastores() throws Exception { createDb(dbName); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, "")); - - Map params = new HashMap(); - params.put("test_param_1", "Use this for comments etc"); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - - StorageDescriptor sd = createStorageDescriptor(tblName, cols, params, serdParams); - - createTable(dbName, tblName, null, null, null, sd, 0); + Table tbl1 = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("c1", ColumnType.STRING_TYPE_NAME) + .addCol("c2", ColumnType.INT_TYPE_NAME) + .build(); + client.createTable(tbl1); // get the table from the client, verify the name is correct Table tbl2 = client.getTable(dbName, tblName); @@ -2757,6 +2534,7 @@ public void testConcurrentMetastores() throws Exception { } } + @Test public void testSimpleFunction() throws Exception { String dbName = "test_db"; String funcName = "test_func"; @@ -2835,6 +2613,7 @@ public void testSimpleFunction() throws Exception { } } + @Test public void testFunctionWithResources() throws Exception { String dbName = "test_db2"; String funcName = "test_func"; @@ -2843,7 +2622,7 @@ public void testFunctionWithResources() throws Exception { PrincipalType ownerType = PrincipalType.USER; int createTime = (int) (System.currentTimeMillis() / 1000); FunctionType funcType = FunctionType.JAVA; - List resList = new ArrayList(); + List resList = new ArrayList<>(); resList.add(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")); resList.add(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")); resList.add(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")); @@ -2889,31 +2668,14 @@ public void testFunctionWithResources() throws Exception { * Unfortunately, derby cannot be run in two different JVMs simultaneously, but the only way * to rename without having it put in this client's cache is to run a metastore in a separate JVM, * so this simulation is required. - * @param oldTableName - * @param newTableName - * @throws SQLException */ private void updateTableNameInDB(String oldTableName, String newTableName) throws SQLException { - String connectionStr = HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTORECONNECTURLKEY); - int interval= 1; - int attempts = 1; - + String connectionStr = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY); - Utilities.SQLCommand execUpdate = new Utilities.SQLCommand() { - @Override - public Void run(PreparedStatement stmt) throws SQLException { - stmt.executeUpdate(); - return null; - } - }; - - Connection conn = Utilities.connectWithRetry(connectionStr, interval, attempts); - - PreparedStatement updateStmt = Utilities.prepareWithRetry(conn, - "UPDATE TBLS SET tbl_name = '" + newTableName + "' WHERE tbl_name = '" + oldTableName + "'", - interval, attempts); - - Utilities.executeWithRetry(execUpdate, updateStmt, interval, attempts); + Connection conn = DriverManager.getConnection(connectionStr); + PreparedStatement stmt = conn.prepareStatement("UPDATE TBLS SET tbl_name = '" + + newTableName + "' WHERE tbl_name = '" + oldTableName + "'"); + stmt.executeUpdate(); } private void cleanUp(String dbName, String tableName, String typeName) throws Exception { @@ -2939,7 +2701,7 @@ private Database createDb(String dbName) throws Exception { private Type createType(String typeName, Map fields) throws Throwable { Type typ1 = new Type(); typ1.setName(typeName); - typ1.setFields(new ArrayList(fields.size())); + typ1.setFields(new ArrayList<>(fields.size())); for(String fieldName : fields.keySet()) { typ1.getFields().add( new FieldSchema(fieldName, fields.get(fieldName), "")); @@ -2952,208 +2714,22 @@ private Type createType(String typeName, Map fields) throws Thro * Creates a simple table under specified database * @param dbName the database name that the table will be created under * @param tableName the table name to be created - * @throws Exception */ - private void createTable(String dbName, String tableName) - throws Exception { - List columns = new ArrayList(); - columns.add(new FieldSchema("foo", "string", "")); - columns.add(new FieldSchema("bar", "string", "")); - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - - StorageDescriptor sd = createStorageDescriptor(tableName, columns, null, serdParams); - - createTable(dbName, tableName, null, null, null, sd, 0); - } - - @Test - public void testTransactionalValidation() throws Throwable { - String dbName = "acidDb"; - silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - String tblName = "acidTable"; - String owner = "acid"; - Map fields = new HashMap(); - fields.put("name", serdeConstants.STRING_TYPE_NAME); - fields.put("income", serdeConstants.INT_TYPE_NAME); - - Type type = createType("Person", fields); - - Map params = new HashMap(); - params.put("transactional", ""); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - StorageDescriptor sd = createStorageDescriptor(tblName, type.getFields(), params, serdParams); - sd.setNumBuckets(0); - sd.unsetBucketCols(); - - /// CREATE TABLE scenarios - - // Fail - No "transactional" property is specified - try { - Table t = createTable(dbName, tblName, owner, params, null, sd, 0); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", e.getMessage()); - } - - // Fail - "transactional" property is set to an invalid value - try { - params.clear(); - params.put("transactional", "foobar"); - Table t = createTable(dbName, tblName, owner, params, null, sd, 0); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", e.getMessage()); - } - - // Fail - "transactional" is set to true, but the table is not bucketed - try { - params.clear(); - params.put("transactional", "true"); - Table t = createTable(dbName, tblName, owner, params, null, sd, 0); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", e.getMessage()); - } - - // Fail - "transactional" is set to true, and the table is bucketed, but doesn't use ORC - try { - params.clear(); - params.put("transactional", "true"); - List bucketCols = new ArrayList(); - bucketCols.add("income"); - sd.setBucketCols(bucketCols); - Table t = createTable(dbName, tblName, owner, params, null, sd, 0); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", e.getMessage()); - } - - // Succeed - "transactional" is set to true, and the table is bucketed, and uses ORC - params.clear(); - params.put("transactional", "true"); - List bucketCols = new ArrayList(); - bucketCols.add("income"); - sd.setBucketCols(bucketCols); - sd.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"); - sd.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"); - Table t = createTable(dbName, tblName, owner, params, null, sd, 0); - Assert.assertTrue("CREATE TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); - - /// ALTER TABLE scenarios - - // Fail - trying to set "transactional" to "false" is not allowed - try { - params.clear(); - params.put("transactional", "false"); - t.setParameters(params); - client.alter_table(dbName, tblName, t); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset: aciddb.acidtable", e.getMessage()); - } - - // Fail - trying to set "transactional" to "true" but doesn't satisfy bucketing and Input/OutputFormat requirement - try { - tblName += "1"; - params.clear(); - sd.unsetBucketCols(); - sd.setInputFormat("org.apache.hadoop.mapred.FileInputFormat"); - t = createTable(dbName, tblName, owner, params, null, sd, 0); - params.put("transactional", "true"); - t.setParameters(params); - client.alter_table(dbName, tblName, t); - Assert.assertTrue("Expected exception", false); - } catch (MetaException e) { - Assert.assertEquals("The table must be stored using an ACID compliant format (such as ORC): aciddb.acidtable1", e.getMessage()); - } - - // Succeed - trying to set "transactional" to "true", and satisfies bucketing and Input/OutputFormat requirement - tblName += "2"; - params.clear(); - sd.setNumBuckets(1); - sd.setBucketCols(bucketCols); - sd.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"); - t = createTable(dbName, tblName, owner, params, null, sd, 0); - params.put("transactional", "true"); - t.setParameters(params); - t.setPartitionKeys(Collections.EMPTY_LIST); - client.alter_table(dbName, tblName, t); - Assert.assertTrue("ALTER TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); - } - - private Table createTable(String dbName, String tblName, String owner, - Map tableParams, Map partitionKeys, - StorageDescriptor sd, int lastAccessTime) throws Exception { - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - if(tableParams != null) { - tbl.setParameters(tableParams); - } - - if(owner != null) { - tbl.setOwner(owner); - } - - if(partitionKeys != null) { - tbl.setPartitionKeys(new ArrayList(partitionKeys.size())); - for(String key : partitionKeys.keySet()) { - tbl.getPartitionKeys().add( - new FieldSchema(key, partitionKeys.get(key), "")); - } - } - - tbl.setSd(sd); - tbl.setLastAccessTime(lastAccessTime); - tbl.setTableType(TableType.MANAGED_TABLE.toString()); - - client.createTable(tbl); - - if (isThriftClient) { - // the createTable() above does not update the location in the 'tbl' - // object when the client is a thrift client and ALTER TABLE relies - // on the location being present in the 'tbl' object - so get the table - // from the metastore - tbl = client.getTable(dbName, tblName); - } - - return tbl; - } - - private StorageDescriptor createStorageDescriptor(String tableName, - List cols, Map params, Map serdParams) { - StorageDescriptor sd = new StorageDescriptor(); - - sd.setCols(cols); - sd.setCompressed(false); - sd.setNumBuckets(1); - sd.setParameters(params); - sd.setBucketCols(new ArrayList(2)); - sd.getBucketCols().add("name"); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tableName); - sd.getSerdeInfo().setParameters(serdParams); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.setSortCols(new ArrayList()); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - - return sd; + private void createTable(String dbName, String tableName) throws TException { + Table t = new TableBuilder() + .setDbName(dbName) + .setTableName(tableName) + .addCol("foo", "string") + .addCol("bar", "string") + .build(); + client.createTable(t); } private List createPartitions(String dbName, Table tbl, List> values) throws Throwable { int i = 1; - List partitions = new ArrayList(); + List partitions = new ArrayList<>(); for(List vals : values) { Partition part = makePartitionObject(dbName, tbl.getTableName(), vals, tbl, "/part"+i); i++; @@ -3175,29 +2751,22 @@ private StorageDescriptor createStorageDescriptor(String tableName, } private void createMultiPartitionTableSchema(String dbName, String tblName, - String typeName, List> values) - throws Throwable, MetaException, TException, NoSuchObjectException { + String typeName, List> values) throws Throwable { createDb(dbName); - Map fields = new HashMap(); - fields.put("name", serdeConstants.STRING_TYPE_NAME); - fields.put("income", serdeConstants.INT_TYPE_NAME); - - Type typ1 = createType(typeName, fields); - - Map partitionKeys = new HashMap(); - partitionKeys.put("ds", serdeConstants.STRING_TYPE_NAME); - partitionKeys.put("hr", serdeConstants.STRING_TYPE_NAME); - - Map params = new HashMap(); - params.put("test_param_1", "Use this for comments etc"); - - Map serdParams = new HashMap(); - serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1"); - - StorageDescriptor sd = createStorageDescriptor(tblName, typ1.getFields(), params, serdParams); - - Table tbl = createTable(dbName, tblName, null, null, partitionKeys, sd, 0); + Map fields = new HashMap<>(); + fields.put("name", ColumnType.STRING_TYPE_NAME); + fields.put("income", ColumnType.INT_TYPE_NAME); + + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .addPartCol("ds", ColumnType.STRING_TYPE_NAME) + .addPartCol("hr", ColumnType.STRING_TYPE_NAME) + .build(); + client.createTable(tbl); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -3211,7 +2780,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, } @Test - public void testDBOwner() throws NoSuchObjectException, MetaException, TException { + public void testDBOwner() throws TException { Database db = client.getDatabase(Warehouse.DEFAULT_DATABASE_NAME); assertEquals(db.getOwnerName(), HiveMetaStore.PUBLIC); assertEquals(db.getOwnerType(), PrincipalType.ROLE); @@ -3219,12 +2788,9 @@ public void testDBOwner() throws NoSuchObjectException, MetaException, TExceptio /** * Test changing owner and owner type of a database - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException */ @Test - public void testDBOwnerChange() throws NoSuchObjectException, MetaException, TException { + public void testDBOwnerChange() throws TException { final String dbName = "alterDbOwner"; final String user1 = "user1"; final String user2 = "user2"; @@ -3252,7 +2818,6 @@ public void testDBOwnerChange() throws NoSuchObjectException, MetaException, TEx /** * Test table objects can be retrieved in batches - * @throws Exception */ @Test public void testGetTableObjects() throws Exception { @@ -3285,8 +2850,8 @@ public void testGetTableObjects() throws Exception { @Test public void testDBLocationChange() throws IOException, TException { final String dbName = "alterDbLocation"; - String defaultUri = HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/default_location.db"; - String newUri = HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/new_location.db"; + String defaultUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/default_location.db"; + String newUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/new_location.db"; Database db = new Database(); db.setName(dbName); @@ -3311,7 +2876,7 @@ public void testDBLocationChange() throws IOException, TException { } private void checkDbOwnerType(String dbName, String ownerName, PrincipalType ownerType) - throws NoSuchObjectException, MetaException, TException { + throws TException { Database db = client.getDatabase(dbName); assertEquals("Owner name", ownerName, db.getOwnerName()); assertEquals("Owner type", ownerType, db.getOwnerType()); @@ -3326,11 +2891,13 @@ private void createFunction(String dbName, String funcName, String className, client.createFunction(func); } + @Test public void testRetriableClientWithConnLifetime() throws Exception { - HiveConf conf = new HiveConf(hiveConf); - conf.setLong(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME.name(), 60); - long timeout = 65 * 1000; // Lets use a timeout more than the socket lifetime to simulate a reconnect + Configuration conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setTimeVar(conf, ConfVars.CLIENT_SOCKET_LIFETIME, 4, TimeUnit.SECONDS); + MetaStoreTestUtils.setConfForStandloneMode(conf); + long timeout = 5 * 1000; // Lets use a timeout more than the socket lifetime to simulate a reconnect // Test a normal retriable client IMetaStoreClient client = RetryingMetaStoreClient.getProxy(conf, getHookLoader(), HiveMetaStoreClient.class.getName()); @@ -3344,27 +2911,28 @@ public void testRetriableClientWithConnLifetime() throws Exception { client.close(); } + @Test public void testJDOPersistanceManagerCleanup() throws Exception { if (isThriftClient == false) { return; } int numObjectsBeforeClose = getJDOPersistanceManagerCacheSize(); - HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf); + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf); closingClient.getAllDatabases(); closingClient.close(); Thread.sleep(5 * 1000); // give HMS time to handle close request int numObjectsAfterClose = getJDOPersistanceManagerCacheSize(); - Assert.assertTrue(numObjectsBeforeClose == numObjectsAfterClose); + assertTrue(numObjectsBeforeClose == numObjectsAfterClose); - HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(hiveConf); + HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf); nonClosingClient.getAllDatabases(); // Drop connection without calling close. HMS thread deleteContext // will trigger cleanup nonClosingClient.getTTransport().close(); Thread.sleep(5 * 1000); int numObjectsAfterDroppedConnection = getJDOPersistanceManagerCacheSize(); - Assert.assertTrue(numObjectsAfterClose == numObjectsAfterDroppedConnection); + assertTrue(numObjectsAfterClose == numObjectsAfterDroppedConnection); } private static int getJDOPersistanceManagerCacheSize() { @@ -3404,6 +2972,7 @@ public HiveMetaHook getHook( return hookLoader; } + @Test public void testValidateTableCols() throws Throwable { try { @@ -3417,26 +2986,12 @@ public void testValidateTableCols() throws Throwable { db.setDescription("Validate Table Columns test"); client.createDatabase(db); - ArrayList cols = new ArrayList(2); - cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, "")); - cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, "")); - - Table tbl = new Table(); - tbl.setDbName(dbName); - tbl.setTableName(tblName); - StorageDescriptor sd = new StorageDescriptor(); - tbl.setSd(sd); - sd.setCols(cols); - sd.setCompressed(false); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tbl.getTableName()); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters() - .put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - sd.setSortCols(new ArrayList()); + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("name", ColumnType.STRING_TYPE_NAME) + .addCol("income", ColumnType.INT_TYPE_NAME) + .build(); client.createTable(tbl); if (isThriftClient) { @@ -3470,11 +3025,13 @@ public void testValidateTableCols() throws Throwable { } } + @Test public void testGetMetastoreUuid() throws Throwable { String uuid = client.getMetastoreDbUuid(); assertNotNull(uuid); } + @Test public void testGetUUIDInParallel() throws Exception { int numThreads = 5; int parallelCalls = 10; @@ -3485,7 +3042,7 @@ public void testGetUUIDInParallel() throws Exception { futures.add(executorService.submit(new Callable>() { @Override public List call() throws Exception { - HiveMetaStoreClient testClient = new HiveMetaStoreClient(hiveConf); + HiveMetaStoreClient testClient = new HiveMetaStoreClient(conf); List uuids = new ArrayList<>(10); for (int i = 0; i < numAPICallsPerThread; i++) { String uuid = testClient.getMetastoreDbUuid(); @@ -3497,7 +3054,7 @@ public void testGetUUIDInParallel() throws Exception { } String firstUUID = null; - List allUuids = new ArrayList(); + List allUuids = new ArrayList<>(); for (Future> future : futures) { for (String uuid : future.get()) { if (firstUUID == null) { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java similarity index 96% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index a19cc86744..d4cedb030f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,19 +17,17 @@ */ package org.apache.hadoop.hive.metastore; -import junit.framework.Assert; - +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.LockResponse; import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -49,15 +47,9 @@ */ public class TestHiveMetaStoreTxns { - private final HiveConf conf = new HiveConf(); + private final Configuration conf = MetastoreConf.newMetastoreConf(); private IMetaStoreClient client; - public TestHiveMetaStoreTxns() throws Exception { - TxnDbUtil.setConfValues(conf); - LogManager.getRootLogger().setLevel(Level.DEBUG); - tearDown(); - } - @Test public void testTxns() throws Exception { List tids = client.openTxns("me", 3).getTxn_ids(); @@ -259,6 +251,8 @@ public void stringifyValidTxns() throws Exception { @Before public void setUp() throws Exception { + MetaStoreTestUtils.setConfForStandloneMode(conf); + TxnDbUtil.setConfValues(conf); TxnDbUtil.prepDb(conf); client = new HiveMetaStoreClient(conf); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java similarity index 64% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index c29a34dc37..fd7524726b 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,17 +23,15 @@ import java.util.List; import java.util.Map; -import junit.framework.TestCase; - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; @@ -43,98 +41,68 @@ import org.apache.hadoop.hive.metastore.events.DropTableEvent; import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; /** * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context * calls in {@link org.apache.hadoop.hive.metastore.HiveMetaStore} */ -public class TestHiveMetaStoreWithEnvironmentContext extends TestCase { +public class TestHiveMetaStoreWithEnvironmentContext { - private HiveConf hiveConf; + private Configuration conf; private HiveMetaStoreClient msc; private EnvironmentContext envContext; private final Database db = new Database(); - private Table table = new Table(); - private final Partition partition = new Partition(); + private Table table; + private Partition partition; private static final String dbName = "hive3252"; private static final String tblName = "tmptbl"; private static final String renamed = "tmptbl2"; - @Override - protected void setUp() throws Exception { - super.setUp(); - + @Before + public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + msc = new HiveMetaStoreClient(conf); msc.dropDatabase(dbName, true, true); - Map envProperties = new HashMap(); + Map envProperties = new HashMap<>(); envProperties.put("hadoop.job.ugi", "test_user"); envContext = new EnvironmentContext(envProperties); db.setName(dbName); - Map tableParams = new HashMap(); - tableParams.put("a", "string"); - List partitionKeys = new ArrayList(); - partitionKeys.add(new FieldSchema("b", "string", "")); - - List cols = new ArrayList(); - cols.add(new FieldSchema("a", "string", "")); - cols.add(new FieldSchema("b", "string", "")); - StorageDescriptor sd = new StorageDescriptor(); - sd.setCols(cols); - sd.setCompressed(false); - sd.setParameters(tableParams); - sd.setSerdeInfo(new SerDeInfo()); - sd.getSerdeInfo().setName(tblName); - sd.getSerdeInfo().setParameters(new HashMap()); - sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); - sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); - sd.setInputFormat(HiveInputFormat.class.getName()); - sd.setOutputFormat(HiveOutputFormat.class.getName()); - - table.setDbName(dbName); - table.setTableName(tblName); - table.setParameters(tableParams); - table.setPartitionKeys(partitionKeys); - table.setSd(sd); - - List partValues = new ArrayList(); - partValues.add("2011"); - partition.setDbName(dbName); - partition.setTableName(tblName); - partition.setValues(partValues); - partition.setSd(table.getSd().deepCopy()); - partition.getSd().setSerdeInfo(table.getSd().getSerdeInfo().deepCopy()); + table = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addTableParam("a", "string") + .addPartCol("b", "string") + .addCol("a", "string") + .addCol("b", "string") + .build(); - DummyListener.notifyList.clear(); - } - @Override - protected void tearDown() throws Exception { - super.tearDown(); + partition = new PartitionBuilder() + .fromTable(table) + .addValue("2011") + .build(); + + DummyListener.notifyList.clear(); } + @Test public void testEnvironmentContext() throws Exception { int listSize = 0; @@ -163,7 +131,7 @@ public void testEnvironmentContext() throws Exception { assert partEvent.getStatus(); assertEquals(envContext, partEvent.getEnvironmentContext()); - List partVals = new ArrayList(); + List partVals = new ArrayList<>(); partVals.add("2012"); msc.appendPartition(dbName, tblName, partVals, envContext); listSize++; @@ -185,7 +153,7 @@ public void testEnvironmentContext() throws Exception { listSize++; assertEquals(notifyList.size(), listSize); - List dropPartVals = new ArrayList(); + List dropPartVals = new ArrayList<>(); dropPartVals.add("2011"); msc.dropPartition(dbName, tblName, dropPartVals, envContext); listSize++; diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java new file mode 100644 index 0000000000..6854a93f32 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.thrift.TException; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class TestMarkPartition { + + protected Configuration conf; + + @Before + public void setUp() throws Exception { + + System.setProperty("hive.metastore.event.clean.freq", "1s"); + System.setProperty("hive.metastore.event.expiry.duration", "2s"); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + + } + + @Test + public void testMarkingPartitionSet() throws TException, InterruptedException { + HiveMetaStoreClient msc = new HiveMetaStoreClient(conf); + + final String dbName = "hive2215"; + msc.dropDatabase(dbName, true, true, true); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(); + msc.createDatabase(db); + + final String tableName = "tmptbl"; + msc.dropTable(dbName, tableName, true, true); + Table table = new TableBuilder() + .setDbName(dbName) + .setTableName(tableName) + .addCol("a", "string") + .addPartCol("b", "string") + .build(); + msc.createTable(table); + + Partition part = new PartitionBuilder() + .fromTable(table) + .addValue("2011") + .build(); + msc.add_partition(part); + Map kvs = new HashMap<>(); + kvs.put("b", "'2011'"); + msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE); + Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE)); + Thread.sleep(3000); + Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE)); + + kvs.put("b", "'2012'"); + Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE)); + try { + msc.markPartitionForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE); + Assert.fail("Expected UnknownTableException"); + } catch (UnknownTableException e) { + // All good + } catch(Exception e){ + Assert.fail("Expected UnknownTableException"); + } + try{ + msc.isPartitionMarkedForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE); + Assert.fail("Expected UnknownTableException"); + } catch (UnknownTableException e) { + // All good + } catch(Exception e){ + Assert.fail("Expected UnknownTableException, received " + e.getClass().getName()); + } + kvs.put("a", "'2012'"); + try { + msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE); + Assert.fail("Expected InvalidPartitionException"); + } catch (InvalidPartitionException e) { + // All good + } catch(Exception e){ + Assert.fail("Expected InvalidPartitionException, received " + e.getClass().getName()); + } + } + +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java similarity index 59% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java index c541193658..ac1cc4c936 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,19 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Before; public class TestMarkPartitionRemote extends TestMarkPartition { - @Override - protected void setUp() throws Exception { - super.setUp(); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + MetaStoreTestUtils.startMetaStore()); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + @Before + public void startServer() throws Exception { + int port = MetaStoreTestUtils.findFreePort(); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java similarity index 64% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index 1ca18b96a4..25e3a955d9 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,67 +18,65 @@ package org.apache.hadoop.hive.metastore; - - -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.session.SessionState; - -import junit.framework.TestCase; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; /** * TestMetaStoreEventListener. Test case for * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener} */ -public class TestMetaStoreEndFunctionListener extends TestCase { - private HiveConf hiveConf; +public class TestMetaStoreEndFunctionListener { + private Configuration conf; private HiveMetaStoreClient msc; - private IDriver driver; - - @Override - protected void setUp() throws Exception { - super.setUp(); + @Before + public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); System.setProperty("hive.metastore.pre.event.listeners", DummyPreListener.class.getName()); System.setProperty("hive.metastore.end.function.listeners", DummyEndFunctionListener.class.getName()); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + msc = new HiveMetaStoreClient(conf); } + @Test public void testEndFunctionListener() throws Exception { /* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods * they bubble up and are stored in the MetaStoreEndFunctionContext objects */ String dbName = "hive3524"; String tblName = "tmptbl"; - int listSize = 0; + int listSize; - driver.run("create database " + dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(); + msc.createDatabase(db); try { msc.getDatabase("UnknownDB"); - } - catch (Exception e) { + } catch (Exception e) { + // All good } listSize = DummyEndFunctionListener.funcNameList.size(); String func_name = DummyEndFunctionListener.funcNameList.get(listSize-1); @@ -90,13 +88,18 @@ public void testEndFunctionListener() throws Exception { assertTrue((e instanceof NoSuchObjectException)); assertEquals(context.getInputTableName(), null); - driver.run("use " + dbName); - driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); - String tableName = "Unknown"; + String unknownTable = "UnknownTable"; + Table table = new TableBuilder() + .setDbName(db) + .setTableName(tblName) + .addCol("a", "string") + .addPartCol("b", "string") + .build(); + msc.createTable(table); try { - msc.getTable(dbName, tableName); - } - catch (Exception e1) { + msc.getTable(dbName, unknownTable); + } catch (Exception e1) { + // All good } listSize = DummyEndFunctionListener.funcNameList.size(); func_name = DummyEndFunctionListener.funcNameList.get(listSize-1); @@ -106,12 +109,12 @@ public void testEndFunctionListener() throws Exception { e = context.getException(); assertTrue((e!=null)); assertTrue((e instanceof NoSuchObjectException)); - assertEquals(context.getInputTableName(), tableName); + assertEquals(context.getInputTableName(), unknownTable); try { msc.getPartition("hive3524", tblName, "b=2012"); - } - catch (Exception e2) { + } catch (Exception e2) { + // All good } listSize = DummyEndFunctionListener.funcNameList.size(); func_name = DummyEndFunctionListener.funcNameList.get(listSize-1); @@ -123,9 +126,9 @@ public void testEndFunctionListener() throws Exception { assertTrue((e instanceof NoSuchObjectException)); assertEquals(context.getInputTableName(), tblName); try { - driver.run("drop table Unknown"); - } - catch (Exception e4) { + msc.dropTable(dbName, unknownTable); + } catch (Exception e4) { + // All good } listSize = DummyEndFunctionListener.funcNameList.size(); func_name = DummyEndFunctionListener.funcNameList.get(listSize-1); @@ -135,7 +138,7 @@ public void testEndFunctionListener() throws Exception { e = context.getException(); assertTrue((e!=null)); assertTrue((e instanceof NoSuchObjectException)); - assertEquals(context.getInputTableName(), "Unknown"); + assertEquals(context.getInputTableName(), "UnknownTable"); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java similarity index 80% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index 358e5d18eb..1508ee5dcf 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,19 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; @@ -60,10 +66,15 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; -import org.apache.hadoop.hive.ql.DriverFactory; -import org.apache.hadoop.hive.ql.IDriver; -import org.apache.hadoop.hive.ql.processors.SetProcessor; -import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertTrue; import com.google.common.collect.Lists; @@ -74,54 +85,39 @@ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener} */ -public class TestMetaStoreEventListener extends TestCase { - private HiveConf hiveConf; +public class TestMetaStoreEventListener { + private Configuration conf; private HiveMetaStoreClient msc; - private IDriver driver; private static final String dbName = "hive2038"; private static final String tblName = "tmptbl"; private static final String renamed = "tmptbl2"; - private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern"; + private static final String metaConfKey = "metastore.partition.name.whitelist.pattern"; private static final String metaConfVal = ""; - @Override - protected void setUp() throws Exception { - - super.setUp(); - + @Before + public void setUp() throws Exception { System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName()); System.setProperty("hive.metastore.pre.event.listeners", DummyPreListener.class.getName()); - hiveConf = new HiveConf(this.getClass()); + conf = MetastoreConf.newMetastoreConf(); - hiveConf.setVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, metaConfVal); - int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf); + MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, - "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); - driver = DriverFactory.newDriver(hiveConf); - - driver.run("drop database if exists " + dbName + " cascade"); + msc = new HiveMetaStoreClient(conf); + msc.dropDatabase(dbName, true, true, true); DummyListener.notifyList.clear(); DummyPreListener.notifyList.clear(); } - @Override - protected void tearDown() throws Exception { - super.tearDown(); - } - private void validateCreateDb(Database expectedDb, Database actualDb) { assertEquals(expectedDb.getName(), actualDb.getName()); assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri()); @@ -226,6 +222,7 @@ private void validateDropIndex(Index expectedIndex, Index actualIndex) { validateIndex(expectedIndex, actualIndex); } + @Test public void testListener() throws Exception { int listSize = 0; @@ -234,21 +231,28 @@ public void testListener() throws Exception { assertEquals(notifyList.size(), listSize); assertEquals(preNotifyList.size(), listSize); - driver.run("create database " + dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(); + msc.createDatabase(db); listSize++; PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1)); - Database db = msc.getDatabase(dbName); + db = msc.getDatabase(dbName); assertEquals(listSize, notifyList.size()); assertEquals(listSize + 1, preNotifyList.size()); validateCreateDb(db, preDbEvent.getDatabase()); CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1)); - assert dbEvent.getStatus(); + Assert.assertTrue(dbEvent.getStatus()); validateCreateDb(db, dbEvent.getDatabase()); - - driver.run("use " + dbName); - driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); + Table table = new TableBuilder() + .setDbName(db) + .setTableName(tblName) + .addCol("a", "string") + .addPartCol("b", "string") + .build(); + msc.createTable(table); PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); @@ -256,33 +260,45 @@ public void testListener() throws Exception { assertEquals(notifyList.size(), listSize); CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1)); - assert tblEvent.getStatus(); + Assert.assertTrue(tblEvent.getStatus()); validateCreateTable(tbl, tblEvent.getTable()); - driver.run("create index tmptbl_i on table tmptbl(a) as 'compact' " + - "WITH DEFERRED REBUILD IDXPROPERTIES ('prop1'='val1', 'prop2'='val2')"); + String indexName = "tmptbl_i"; + Index index = new IndexBuilder() + .setDbAndTableName(table) + .setIndexName(indexName) + .addCol("a", "string") + .setDeferredRebuild(true) + .addIndexParam("prop1", "val1") + .addIndexParam("prop2", "val2") + .build(); + Table indexTable = new TableBuilder() + .fromIndex(index) + .build(); + msc.createIndex(index, indexTable); listSize += 2; // creates index table internally assertEquals(notifyList.size(), listSize); AddIndexEvent addIndexEvent = (AddIndexEvent)notifyList.get(listSize - 1); - assert addIndexEvent.getStatus(); - PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 3)); + Assert.assertTrue(addIndexEvent.getStatus()); + PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 2)); - Index oldIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i"); + Index oldIndex = msc.getIndex(dbName, tblName, indexName); validateAddIndex(oldIndex, addIndexEvent.getIndex()); validateAddIndex(oldIndex, preAddIndexEvent.getIndex()); - driver.run("alter index tmptbl_i on tmptbl set IDXPROPERTIES " + - "('prop1'='val1_new', 'prop3'='val3')"); + Index alteredIndex = new Index(oldIndex); + alteredIndex.getParameters().put("prop3", "val3"); + msc.alter_index(dbName, tblName, indexName, alteredIndex); listSize++; assertEquals(notifyList.size(), listSize); - Index newIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i"); + Index newIndex = msc.getIndex(dbName, tblName, indexName); AlterIndexEvent alterIndexEvent = (AlterIndexEvent) notifyList.get(listSize - 1); - assert alterIndexEvent.getStatus(); + Assert.assertTrue(alterIndexEvent.getStatus()); validateAlterIndex(oldIndex, alterIndexEvent.getOldIndex(), newIndex, alterIndexEvent.getNewIndex()); @@ -290,25 +306,29 @@ public void testListener() throws Exception { validateAlterIndex(oldIndex, preAlterIndexEvent.getOldIndex(), newIndex, preAlterIndexEvent.getNewIndex()); - driver.run("drop index tmptbl_i on tmptbl"); + msc.dropIndex(dbName, tblName, indexName, true); listSize++; assertEquals(notifyList.size(), listSize); DropIndexEvent dropIndexEvent = (DropIndexEvent) notifyList.get(listSize - 1); - assert dropIndexEvent.getStatus(); + Assert.assertTrue(dropIndexEvent.getStatus()); validateDropIndex(newIndex, dropIndexEvent.getIndex()); PreDropIndexEvent preDropIndexEvent = (PreDropIndexEvent) (preNotifyList.get(preNotifyList.size() - 1)); validateDropIndex(newIndex, preDropIndexEvent.getIndex()); - driver.run("alter table tmptbl add partition (b='2011')"); + Partition part = new PartitionBuilder() + .fromTable(table) + .addValue("2011") + .build(); + msc.add_partition(part); listSize++; assertEquals(notifyList.size(), listSize); PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); - assert partEvent.getStatus(); - Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011"); + Assert.assertTrue(partEvent.getStatus()); + part = msc.getPartition("hive2038", "tmptbl", "b=2011"); Partition partAdded = partEvent.getPartitionIterator().next(); validateAddPartition(part, partAdded); validateTableInAddPartition(tbl, partEvent.getTable()); @@ -316,8 +336,8 @@ public void testListener() throws Exception { // Test adding multiple partitions in a single partition-set, atomically. int currentTime = (int)System.currentTimeMillis(); - HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(hiveConf); - Table table = hmsClient.getTable(dbName, "tmptbl"); + HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf); + table = hmsClient.getTable(dbName, "tmptbl"); Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime, currentTime, table.getSd(), table.getParameters()); Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime, @@ -334,7 +354,8 @@ public void testListener() throws Exception { assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues()); assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues()); - driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'")); + part.setLastAccessTime((int)(System.currentTimeMillis()/1000)); + msc.alter_partition(dbName, tblName, part); listSize++; assertEquals(notifyList.size(), listSize); PreAlterPartitionEvent preAlterPartEvent = @@ -345,7 +366,7 @@ public void testListener() throws Exception { Partition origP = msc.getPartition(dbName, tblName, "b=2011"); AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1); - assert alterPartEvent.getStatus(); + Assert.assertTrue(alterPartEvent.getStatus()); validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(), alterPartEvent.getOldPartition().getTableName(), alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition()); @@ -355,7 +376,7 @@ public void testListener() throws Exception { preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(), preAlterPartEvent.getNewPartition()); - List part_vals = new ArrayList(); + List part_vals = new ArrayList<>(); part_vals.add("c=2012"); int preEventListSize; preEventListSize = preNotifyList.size() + 1; @@ -374,25 +395,31 @@ public void testListener() throws Exception { (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0)); - driver.run(String.format("alter table %s rename to %s", tblName, renamed)); + Table renamedTable = new Table(table); + renamedTable.setTableName(renamed); + msc.alter_table(dbName, tblName, renamedTable); listSize++; assertEquals(notifyList.size(), listSize); PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); - Table renamedTable = msc.getTable(dbName, renamed); + renamedTable = msc.getTable(dbName, renamed); AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1); - assert alterTableE.getStatus(); + Assert.assertTrue(alterTableE.getStatus()); validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable()); validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable()); //change the table name back - driver.run(String.format("alter table %s rename to %s", renamed, tblName)); + table = new Table(renamedTable); + table.setTableName(tblName); + msc.alter_table(dbName, renamed, table); listSize++; assertEquals(notifyList.size(), listSize); - driver.run(String.format("alter table %s ADD COLUMNS (c int)", tblName)); + table = msc.getTable(dbName, tblName); + table.getSd().addToCols(new FieldSchema("c", "int", "")); + msc.alter_table(dbName, tblName, table); listSize++; assertEquals(notifyList.size(), listSize); preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); @@ -400,19 +427,19 @@ public void testListener() throws Exception { Table altTable = msc.getTable(dbName, tblName); alterTableE = (AlterTableEvent) notifyList.get(listSize-1); - assert alterTableE.getStatus(); + Assert.assertTrue(alterTableE.getStatus()); validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable()); validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable()); - Map kvs = new HashMap(1); + Map kvs = new HashMap<>(1); kvs.put("b", "2011"); msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE); listSize++; assertEquals(notifyList.size(), listSize); LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1); - assert partMarkEvent.getStatus(); + Assert.assertTrue(partMarkEvent.getStatus()); validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(), partMarkEvent.getPartitionName()); @@ -421,64 +448,66 @@ public void testListener() throws Exception { validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(), prePartMarkEvent.getPartitionName()); - driver.run(String.format("alter table %s drop partition (b='2011')", tblName)); + msc.dropPartition(dbName, tblName, Collections.singletonList("2011")); listSize++; assertEquals(notifyList.size(), listSize); PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList .size() - 1); DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1); - assert dropPart.getStatus(); + Assert.assertTrue(dropPart.getStatus()); validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator()); validateTableInDropPartition(tbl, dropPart.getTable()); validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator()); validateTableInDropPartition(tbl, preDropPart.getTable()); - driver.run("drop table " + tblName); + msc.dropTable(dbName, tblName); listSize++; assertEquals(notifyList.size(), listSize); PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1); DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1); - assert dropTbl.getStatus(); + Assert.assertTrue(dropTbl.getStatus()); validateDropTable(tbl, dropTbl.getTable()); validateDropTable(tbl, preDropTbl.getTable()); - driver.run("drop database " + dbName); + msc.dropDatabase(dbName); listSize++; assertEquals(notifyList.size(), listSize); PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1); DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1); - assert dropDB.getStatus(); + Assert.assertTrue(dropDB.getStatus()); validateDropDb(db, dropDB.getDatabase()); validateDropDb(db, preDropDB.getDatabase()); - SetProcessor.setVariable("metaconf:hive.metastore.try.direct.sql", "false"); + msc.setMetaConf("metastore.try.direct.sql", "false"); ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1); - assertEquals("hive.metastore.try.direct.sql", event.getKey()); + assertEquals("metastore.try.direct.sql", event.getKey()); assertEquals("true", event.getOldValue()); assertEquals("false", event.getNewValue()); } + @Test public void testMetaConfNotifyListenersClosingClient() throws Exception { - HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null); + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); closingClient.setMetaConf(metaConfKey, "[test pattern modified]"); ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); assertEquals(event.getOldValue(), metaConfVal); assertEquals(event.getNewValue(), "[test pattern modified]"); closingClient.close(); - Thread.sleep(5 * 1000); + Thread.sleep(2 * 1000); event = (ConfigChangeEvent) DummyListener.getLastEvent(); assertEquals(event.getOldValue(), "[test pattern modified]"); assertEquals(event.getNewValue(), metaConfVal); } + @Test public void testMetaConfNotifyListenersNonClosingClient() throws Exception { - HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(hiveConf, null); + HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null); nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]"); ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); assertEquals(event.getOldValue(), metaConfVal); @@ -486,35 +515,37 @@ public void testMetaConfNotifyListenersNonClosingClient() throws Exception { // This should also trigger meta listener notification via TServerEventHandler#deleteContext nonClosingClient.getTTransport().close(); - Thread.sleep(5 * 1000); + Thread.sleep(2 * 1000); event = (ConfigChangeEvent) DummyListener.getLastEvent(); assertEquals(event.getOldValue(), "[test pattern modified]"); assertEquals(event.getNewValue(), metaConfVal); } + @Test public void testMetaConfDuplicateNotification() throws Exception { - HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null); + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); closingClient.setMetaConf(metaConfKey, metaConfVal); int beforeCloseNotificationEventCounts = DummyListener.notifyList.size(); closingClient.close(); - Thread.sleep(5 * 1000); + Thread.sleep(2 * 1000); int afterCloseNotificationEventCounts = DummyListener.notifyList.size(); // Setting key to same value, should not trigger configChange event during shutdown assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts); } + @Test public void testMetaConfSameHandler() throws Exception { - HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null); + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); closingClient.setMetaConf(metaConfKey, "[test pattern modified]"); ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); int beforeCloseNotificationEventCounts = DummyListener.notifyList.size(); IHMSHandler beforeHandler = event.getIHMSHandler(); closingClient.close(); - Thread.sleep(5 * 1000); + Thread.sleep(2 * 1000); event = (ConfigChangeEvent) DummyListener.getLastEvent(); int afterCloseNotificationEventCounts = DummyListener.notifyList.size(); IHMSHandler afterHandler = event.getIHMSHandler(); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java new file mode 100644 index 0000000000..de729c752d --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.events.ListenerEvent; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import junit.framework.TestCase; + +/** + * Ensure that the status of MetaStore events depend on the RawStore's commit status. + */ +public class TestMetaStoreEventListenerOnlyOnCommit { + + private Configuration conf; + private HiveMetaStoreClient msc; + + @Before + public void setUp() throws Exception { + DummyRawStoreControlledCommit.setCommitSucceed(true); + + System.setProperty(ConfVars.EVENT_LISTENERS.toString(), DummyListener.class.getName()); + System.setProperty(ConfVars.RAW_STORE_IMPL.toString(), + DummyRawStoreControlledCommit.class.getName()); + + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + msc = new HiveMetaStoreClient(conf); + + DummyListener.notifyList.clear(); + } + + @Test + public void testEventStatus() throws Exception { + int listSize = 0; + List notifyList = DummyListener.notifyList; + assertEquals(notifyList.size(), listSize); + + String dbName = "tmpDb"; + Database db = new DatabaseBuilder() + .setName(dbName) + .build(); + msc.createDatabase(db); + + listSize += 1; + notifyList = DummyListener.notifyList; + assertEquals(notifyList.size(), listSize); + assertTrue(DummyListener.getLastEvent().getStatus()); + + String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit"; + Table table = new TableBuilder() + .setDbName(db) + .setTableName(tableName) + .addCol("id", "int") + .addPartCol("ds", "string") + .build(); + msc.createTable(table); + listSize += 1; + notifyList = DummyListener.notifyList; + assertEquals(notifyList.size(), listSize); + assertTrue(DummyListener.getLastEvent().getStatus()); + + Partition part = new PartitionBuilder() + .fromTable(table) + .addValue("foo1") + .build(); + msc.add_partition(part); + listSize += 1; + notifyList = DummyListener.notifyList; + assertEquals(notifyList.size(), listSize); + assertTrue(DummyListener.getLastEvent().getStatus()); + + DummyRawStoreControlledCommit.setCommitSucceed(false); + + part = new PartitionBuilder() + .fromTable(table) + .addValue("foo2") + .build(); + msc.add_partition(part); + listSize += 1; + notifyList = DummyListener.notifyList; + assertEquals(notifyList.size(), listSize); + assertFalse(DummyListener.getLastEvent().getStatus()); + + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java new file mode 100644 index 0000000000..82e39f1cf3 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import com.google.common.collect.Lists; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; +import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AlterTableEvent; +import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.DropIndexEvent; +import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.DropTableEvent; +import org.apache.hadoop.hive.metastore.events.ListenerEvent; +import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; +import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; +import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Mostly same tests as TestMetaStoreEventListener, but using old hive conf values instead of new + * metastore conf values. + */ +public class TestMetaStoreEventListenerWithOldConf { + private Configuration conf; + + private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern"; + private static final String metaConfVal = ""; + + @Before + public void setUp() throws Exception { + System.setProperty("hive.metastore.event.listeners", + DummyListener.class.getName()); + System.setProperty("hive.metastore.pre.event.listeners", + DummyPreListener.class.getName()); + + int port = MetaStoreTestUtils.findFreePort(); + conf = MetastoreConf.newMetastoreConf(); + + MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf); + + DummyListener.notifyList.clear(); + DummyPreListener.notifyList.clear(); + } + + @Test + public void testMetaConfNotifyListenersClosingClient() throws Exception { + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); + closingClient.setMetaConf(metaConfKey, "[test pattern modified]"); + ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); + assertEquals(event.getOldValue(), metaConfVal); + assertEquals(event.getNewValue(), "[test pattern modified]"); + closingClient.close(); + + Thread.sleep(2 * 1000); + + event = (ConfigChangeEvent) DummyListener.getLastEvent(); + assertEquals(event.getOldValue(), "[test pattern modified]"); + assertEquals(event.getNewValue(), metaConfVal); + } + + @Test + public void testMetaConfNotifyListenersNonClosingClient() throws Exception { + HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null); + nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]"); + ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); + assertEquals(event.getOldValue(), metaConfVal); + assertEquals(event.getNewValue(), "[test pattern modified]"); + // This should also trigger meta listener notification via TServerEventHandler#deleteContext + nonClosingClient.getTTransport().close(); + + Thread.sleep(2 * 1000); + + event = (ConfigChangeEvent) DummyListener.getLastEvent(); + assertEquals(event.getOldValue(), "[test pattern modified]"); + assertEquals(event.getNewValue(), metaConfVal); + } + + @Test + public void testMetaConfDuplicateNotification() throws Exception { + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); + closingClient.setMetaConf(metaConfKey, metaConfVal); + int beforeCloseNotificationEventCounts = DummyListener.notifyList.size(); + closingClient.close(); + + Thread.sleep(2 * 1000); + + int afterCloseNotificationEventCounts = DummyListener.notifyList.size(); + // Setting key to same value, should not trigger configChange event during shutdown + assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts); + } + + @Test + public void testMetaConfSameHandler() throws Exception { + HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); + closingClient.setMetaConf(metaConfKey, "[test pattern modified]"); + ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent(); + int beforeCloseNotificationEventCounts = DummyListener.notifyList.size(); + IHMSHandler beforeHandler = event.getHandler(); + closingClient.close(); + + Thread.sleep(2 * 1000); + event = (ConfigChangeEvent) DummyListener.getLastEvent(); + int afterCloseNotificationEventCounts = DummyListener.notifyList.size(); + IHMSHandler afterHandler = event.getHandler(); + // Meta-conf cleanup should trigger an event to listener + assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts); + // Both the handlers should be same + assertEquals(beforeHandler, afterHandler); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java new file mode 100644 index 0000000000..f692b0a87d --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * TestMetaStoreInitListener. Test case for + * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener} + */ +public class TestMetaStoreInitListener { + private Configuration conf; + + @Before + public void setUp() throws Exception { + System.setProperty("hive.metastore.init.hooks", DummyMetaStoreInitListener.class.getName()); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + } + + @Test + public void testMetaStoreInitListener() throws Exception { + // DummyMataStoreInitListener's onInit will be called at HMSHandler + // initialization, and set this to true + Assert.assertTrue(DummyMetaStoreInitListener.wasCalled); + } + +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java similarity index 86% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java index 99b67bb5f1..3fdce48237 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,23 +20,28 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import junit.framework.Assert; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; /** * Test for unwrapping InvocationTargetException, which is thrown from * constructor of listener class */ -public class TestMetaStoreListenersError extends TestCase { +public class TestMetaStoreListenersError { + @Test public void testInitListenerException() throws Throwable { System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName()); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); int port = MetaStoreTestUtils.findFreePort(); try { - HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); + HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf); + Assert.fail(); } catch (Throwable throwable) { Assert.assertEquals(MetaException.class, throwable.getClass()); Assert.assertEquals( @@ -47,13 +52,17 @@ public void testInitListenerException() throws Throwable { } } + @Test public void testEventListenerException() throws Throwable { System.setProperty("hive.metastore.init.hooks", ""); System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName()); + Configuration conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); int port = MetaStoreTestUtils.findFreePort(); try { - HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge()); + HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf); + Assert.fail(); } catch (Throwable throwable) { Assert.assertEquals(MetaException.class, throwable.getClass()); Assert.assertEquals( diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 24ea62edda..372dee6369 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -129,8 +129,7 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); + MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); @@ -462,8 +461,7 @@ public void testNonConfDatanucleusValueSet() { String value1 = "another_value"; Assume.assumeTrue(System.getProperty(key) == null); Configuration localConf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setVar(localConf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); + MetaStoreTestUtils.setConfForStandloneMode(localConf); localConf.set(key, value); localConf.set(key1, value1); objectStore = new ObjectStore(); @@ -537,8 +535,7 @@ public void testConcurrentAddNotifications() throws ExecutionException, Interrup .debug(NUM_THREADS + " threads going to add notification")); Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); + MetaStoreTestUtils.setConfForStandloneMode(conf); /* Below are the properties that need to be set based on what database this test is going to be run */ diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java similarity index 62% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java index 1695bfd0fa..b4e5a85a9c 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,31 +23,35 @@ import java.sql.DriverManager; import java.sql.SQLException; +import java.util.concurrent.TimeUnit; import javax.jdo.JDOCanRetryException; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestObjectStoreInitRetry { - - private static boolean noisy = true; // switch to true to see line number debug traces for FakeDerby calls + private static final Logger LOG = LoggerFactory.getLogger(TestObjectStoreInitRetry.class); private static int injectConnectFailure = 0; - public static void setInjectConnectFailure(int x){ + private static void setInjectConnectFailure(int x){ injectConnectFailure = x; } - public static int getInjectConnectFailure(){ + private static int getInjectConnectFailure(){ return injectConnectFailure; } - public static void decrementInjectConnectFailure(){ + private static void decrementInjectConnectFailure(){ injectConnectFailure--; } @@ -63,53 +67,54 @@ public static void oneTimeTearDown() throws SQLException { DriverManager.deregisterDriver(new FakeDerby()); } - public static void misbehave() throws RuntimeException{ + static void misbehave() throws RuntimeException{ TestObjectStoreInitRetry.debugTrace(); if (TestObjectStoreInitRetry.getInjectConnectFailure() > 0){ TestObjectStoreInitRetry.decrementInjectConnectFailure(); RuntimeException re = new JDOCanRetryException(); - if (noisy){ - System.err.println("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure()); - re.printStackTrace(System.err); - } + LOG.debug("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure(), re); throw re; } } // debug instrumenter - useful in finding which fns get called, and how often - public static void debugTrace() { - if (noisy){ + static void debugTrace() { + if (LOG.isDebugEnabled()){ Exception e = new Exception(); - System.err.println("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure()); + LOG.debug("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure()); } } - protected static HiveConf hiveConf; + protected static Configuration conf; @Test public void testObjStoreRetry() throws Exception { - hiveConf = new HiveConf(this.getClass()); + conf = MetastoreConf.newMetastoreConf(); - hiveConf.setIntVar(ConfVars.HMSHANDLERATTEMPTS, 4); - hiveConf.setVar(ConfVars.HMSHANDLERINTERVAL, "1s"); - hiveConf.setVar(ConfVars.METASTORE_CONNECTION_DRIVER,FakeDerby.class.getName()); - hiveConf.setBoolVar(ConfVars.METASTORE_TRY_DIRECT_SQL,true); - String jdbcUrl = hiveConf.get(ConfVars.METASTORECONNECTURLKEY.varname); + MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, 4); + MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 1, TimeUnit.SECONDS); + MetastoreConf.setVar(conf, ConfVars.CONNECTION_DRIVER,FakeDerby.class.getName()); + MetastoreConf.setBoolVar(conf, ConfVars.TRY_DIRECT_SQL,true); + String jdbcUrl = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY); jdbcUrl = jdbcUrl.replace("derby","fderby"); - hiveConf.setVar(ConfVars.METASTORECONNECTURLKEY,jdbcUrl); + MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY,jdbcUrl); + MetaStoreTestUtils.setConfForStandloneMode(conf); + + FakeDerby fd = new FakeDerby(); ObjectStore objStore = new ObjectStore(); Exception savE = null; try { setInjectConnectFailure(5); - objStore.setConf(hiveConf); + objStore.setConf(conf); + Assert.fail(); } catch (Exception e) { - e.printStackTrace(System.err); + LOG.info("Caught exception ", e); savE = e; } - /** + /* * A note on retries. * * We've configured a total of 4 attempts. @@ -120,7 +125,7 @@ public void testObjStoreRetry() throws Exception { assertNotNull(savE); setInjectConnectFailure(0); - objStore.setConf(hiveConf); + objStore.setConf(conf); assertEquals(0, getInjectConnectFailure()); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index bf8556d3b0..6a44833a67 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -89,9 +89,8 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); - MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class, PartitionExpressionProxy.class); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); store = new ObjectStore(); store.setConf(conf); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java similarity index 84% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java index e3e175bc65..180a66694f 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +23,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.junit.BeforeClass; import org.junit.Test; @@ -35,16 +35,15 @@ public class TestPartitionNameWhitelistValidation { private static final String partitionValidationPattern = "[\\x20-\\x7E&&[^,]]*"; - private static HiveConf hiveConf; + private static Configuration conf; private static HiveMetaStoreClient msc; @BeforeClass public static void setupBeforeClass() throws Exception { - System.setProperty(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname, - partitionValidationPattern); - hiveConf = new HiveConf(); - SessionState.start(new CliSessionState(hiveConf)); - msc = new HiveMetaStoreClient(hiveConf); + System.setProperty(ConfVars.PARTITION_NAME_WHITELIST_PATTERN.toString(), partitionValidationPattern); + conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); + msc = new HiveMetaStoreClient(conf); } // Runs an instance of DisallowUnicodePreEventListener @@ -61,7 +60,7 @@ private boolean runValidation(List partVals) { // Sample data private List getPartValsWithUnicode() { - List partVals = new ArrayList(); + List partVals = new ArrayList<>(); partVals.add("klâwen"); partVals.add("tägelîch"); @@ -69,7 +68,7 @@ private boolean runValidation(List partVals) { } private List getPartValsWithCommas() { - List partVals = new ArrayList(); + List partVals = new ArrayList<>(); partVals.add("a,b"); partVals.add("c,d,e,f"); @@ -77,7 +76,7 @@ private boolean runValidation(List partVals) { } private List getPartValsWithValidCharacters() { - List partVals = new ArrayList(); + List partVals = new ArrayList<>(); partVals.add("part1"); partVals.add("part2"); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java similarity index 70% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java index ec84e66b8e..8976474b01 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +18,11 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.junit.Assert; +import org.junit.Before; public class TestRemoteHiveMetaStore extends TestHiveMetaStore { @@ -33,17 +34,18 @@ public TestRemoteHiveMetaStore() { isThriftClient = true; } - @Override - protected void setUp() throws Exception { + @Before + public void setUp() throws Exception { super.setUp(); if (isServerStarted) { - assertNotNull("Unable to connect to the MetaStore server", client); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + Assert.assertNotNull("Unable to connect to the MetaStore server", client); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); return; } - port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf); + port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), + conf); System.out.println("Starting MetaStore Server on port " + port); isServerStarted = true; @@ -53,8 +55,8 @@ protected void setUp() throws Exception { @Override protected HiveMetaStoreClient createClient() throws Exception { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, false); - return new HiveMetaStoreClient(hiveConf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, false); + return new HiveMetaStoreClient(conf); } } \ No newline at end of file diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java new file mode 100644 index 0000000000..370cd28adf --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Before; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + * TestRemoteHiveMetaStoreIpAddress. + * + * Test which checks that the remote Hive metastore stores the proper IP address using + * IpAddressListener + */ +public class TestRemoteHiveMetaStoreIpAddress { + private static final Logger LOG = LoggerFactory.getLogger(TestRemoteHiveMetaStoreIpAddress.class); + private static Configuration conf; + private static HiveMetaStoreClient msc; + + @Before + public void setUp() throws Exception { + conf = MetastoreConf.newMetastoreConf(); + + + System.setProperty(ConfVars.EVENT_LISTENERS.toString(), IpAddressListener.class.getName()); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + LOG.debug("Starting MetaStore Server on port " + port); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + + msc = new HiveMetaStoreClient(conf); + } + + @Test + public void testIpAddress() throws Exception { + Database db = new Database(); + db.setName("testIpAddressIp"); + msc.createDatabase(db); + msc.dropDatabase(db.getName()); + } +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java similarity index 87% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java index 86582620da..92d2d0e24c 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; + +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; public class TestRemoteUGIHiveMetaStoreIpAddress extends TestRemoteHiveMetaStoreIpAddress { public TestRemoteUGIHiveMetaStoreIpAddress() { - super(); - System.setProperty(ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true"); + System.setProperty(MetastoreConf.ConfVars.EXECUTE_SET_UGI.toString(), "true"); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java new file mode 100644 index 0000000000..badcd60ba1 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * TestRetryingHMSHandler. Test case for + * {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler} + */ +public class TestRetryingHMSHandler { + private Configuration conf; + private HiveMetaStoreClient msc; + + @Before + public void setUp() throws Exception { + System.setProperty("hive.metastore.pre.event.listeners", + AlternateFailurePreListener.class.getName()); + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3); + MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, 2); + MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS); + MetastoreConf.setBoolVar(conf, ConfVars.HMSHANDLERFORCERELOADCONF, false); + MetaStoreTestUtils.setConfForStandloneMode(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + msc = new HiveMetaStoreClient(conf); + } + + // Create a database and a table in that database. Because the AlternateFailurePreListener is + // being used each attempt to create something should require two calls by the RetryingHMSHandler + @Test + public void testRetryingHMSHandler() throws Exception { + String dbName = "hive4159"; + String tblName = "tmptbl"; + + Database db = new Database(); + db.setName(dbName); + msc.createDatabase(db); + + Assert.assertEquals(2, AlternateFailurePreListener.getCallCount()); + + Table tbl = new TableBuilder() + .setDbName(dbName) + .setTableName(tblName) + .addCol("c1", ColumnType.STRING_TYPE_NAME) + .build(); + + msc.createTable(tbl); + + Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); + } + +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java similarity index 88% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java index 98708a6284..e34d089e10 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; public class TestSetUGIOnBothClientServer extends TestRemoteHiveMetaStore{ @@ -26,6 +26,6 @@ public TestSetUGIOnBothClientServer() { super(); isThriftClient = true; // This will turn on setugi on both client and server processes of the test. - System.setProperty(ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true"); + System.setProperty(MetastoreConf.ConfVars.EXECUTE_SET_UGI.toString(), "true"); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java similarity index 76% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java index 1a9abc9c8c..beff65656a 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; public class TestSetUGIOnOnlyClient extends TestRemoteHiveMetaStore{ @Override protected HiveMetaStoreClient createClient() throws Exception { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); - return new HiveMetaStoreClient(hiveConf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, true); + return new HiveMetaStoreClient(conf); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java similarity index 77% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java rename to standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java index b45fd011b9..bec5a5512d 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,14 +18,15 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; public class TestSetUGIOnOnlyServer extends TestSetUGIOnBothClientServer { @Override protected HiveMetaStoreClient createClient() throws Exception { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, false); - return new HiveMetaStoreClient(hiveConf); + MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, false); + return new HiveMetaStoreClient(conf); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index c0e84fcfbc..b9a8f61c69 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy; @@ -60,8 +61,7 @@ public void setUp() throws Exception { Configuration conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); - MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, - MockPartitionExpressionProxy.class.getName()); + MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); cachedStore = new CachedStore();