commit c8162fede5ffe872914beb6aff3c37f920554c07 Author: Alan Gates Date: Wed Jan 20 17:08:46 2016 -0800 Added DualStore. diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 5c82c42..18633ae 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -444,6 +444,21 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { " invalided by updates or partition drops before this. Default is one week."), METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1, "Number of threads to use to read file metadata in background to cache it."), + METASTORE_DUAL_RAWSTORE_PRIMARY("hive.metastore.dual.rawstore.primary", + "org.apache.hadoop.hive.metastore.ObjectStore", + "When the RawStore implementation is set to DualStore, which RawStore to use as the " + + "primary"), + METASTORE_DUAL_RAWSTORE_SECONDARY("hive.metastore.dual.rawstore.secondary", + "org.apache.hadoop.hive.metastore.hbase.HBaseStore", + "When the RawStore implementation is set to DualStore, which RawStore to use as the " + + "secondary"), + METASTORE_DUAL_RAWSTORE_READER("hive.metastore.dual.rawstore.reader", "primary", + new StringSet(false, "primary", "secondary"), + "Whether the primary or secondary store should be used for reading. Valid values are " + + "'primary' and 'secondary'"), + METASTORE_DUAL_TXNHANDLER("hive.metastore.dual.txnhandler", + "org.apache.hadoop.hive.metastore.txn.TxnHandler", + "When the TxnHandler implementation is set to DualStore, which TxnHandler to use"), METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), @@ -651,7 +666,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore", "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" + "This class is used to store and retrieval of raw metadata objects such as table, database"), - METASTORE_TXN_HANDLER_IMPL("hive.metastore.txnhandler.impl", + METASTORE_TXNHANDLER_IMPL("hive.metastore.txnhandler.impl", "org.apache.hadoop.hive.metastore.txn.TxnHandlerRdbms", "Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnHandler. This " + "class is used to store and retrieve transactions and locks"), diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestDualStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestDualStore.java new file mode 100644 index 0000000..21e0f89 --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestDualStore.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.txn.TxnHandlerRdbms; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; + +public class TestDualStore extends HBaseIntegrationTests { + private static final Logger LOG = LoggerFactory.getLogger(TestDualStore.class.getName()); + + @Rule public ExpectedException thrown = ExpectedException.none(); + + private RawStore rs; + + @BeforeClass + public static void startup() throws Exception { + HBaseIntegrationTests.startMiniCluster(); + } + + @AfterClass + public static void shutdown() throws Exception { + HBaseIntegrationTests.shutdownMiniCluster(); + } + + @Before + public void setup() throws Exception { + conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, DualStore.class.getName()); + conf.setVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_PRIMARY, ObjectStore.class.getName()); + conf.setVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_SECONDARY, HBaseStore.class.getName()); + conf.setVar(HiveConf.ConfVars.METASTORE_DUAL_TXNHANDLER, TxnHandlerRdbms.class.getName()); + // Turn off ZK, else it makes a mess + conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + rs = new DualStore(); + } + + @Test + public void dbPrimary() throws Exception { + conf.setVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_READER, "primary"); + rs.setConf(conf); + db("primary"); + } + + @Test + public void dbSecondary() throws Exception { + conf.setVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_READER, "secondary"); + rs.setConf(conf); + db("secondary"); + } + + private void db(String name) throws Exception { + Database db = new Database(name, "description", "hdfs://here", new HashMap()); + rs.createDatabase(db); + db.setOwnerType(PrincipalType.USER); // Have to do this so the equals works + Database fetched = rs.getDatabase(name); + Assert.assertEquals(db, fetched); + + Database alteredDb = new Database(db); + alteredDb.setOwnerName("fred"); + rs.alterDatabase(name, alteredDb); + fetched = rs.getDatabase(name); + Assert.assertEquals(alteredDb, fetched); + + rs.dropDatabase(name); + thrown.expect(NoSuchObjectException.class); + rs.getDatabase(name); + } +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java index 24bebb8..e308363 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java @@ -298,26 +298,26 @@ public void oneMondoTest() throws Exception { out = new PrintStream(outStr); tool.go(false, HBaseReadWrite.DB_TABLE, "db0", null, conf, out, err); Assert.assertEquals("{\"name\":\"db0\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, outStr.toString()); + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep, outStr.toString()); outStr = new ByteArrayOutputStream(); out = new PrintStream(outStr); tool.go(false, HBaseReadWrite.DB_TABLE, null, ".*", conf, out, err); Assert.assertEquals("{\"name\":\"db0\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep + "{\"name\":\"db1\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep + "{\"name\":\"db2\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep, outStr.toString()); outStr = new ByteArrayOutputStream(); out = new PrintStream(outStr); tool.go(false, HBaseReadWrite.DB_TABLE, null, "db[12]", conf, out, err); Assert.assertEquals("{\"name\":\"db1\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep + + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep + "{\"name\":\"db2\",\"description\":\"no description\"," + - "\"locationUri\":\"file:///tmp\",\"parameters\":{}}" + lsep, + "\"locationUri\":\"file:///tmp\",\"parameters\":{},\"ownerType\":1}" + lsep, outStr.toString()); String[] roleNames = new String[2]; diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DualStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DualStore.java new file mode 100644 index 0000000..a407211 --- /dev/null +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/DualStore.java @@ -0,0 +1,1218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.FileMetadataHandler; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.RawStore; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.CheckLockRequest; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; +import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.TxnHandler; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * A proxy class that allows you to have multiple RawStores. The goal is for testing new + * RawStore implementations. One store is primary. All reads will be done from this store. + * Writes will be done to both stores simultaneously. If the write to the secondary fails an + * error will be logged but the operation will not fail. Failures on write to the primary will + * result in an error being returned to the caller. + */ +public class DualStore implements RawStore, TxnHandler { + static final private Logger LOG = LoggerFactory.getLogger(DualStore.class.getName()); + + private static Map methods; + + private HiveConf ourConf; + private HiveConf primaryConf; + private HiveConf secondaryConf; + private RawStore primaryRawStore; + private RawStore secondaryRawStore; + private RawStore readRawStore; + private TxnHandler txnHandler; + private ExecutorService threadPool; + + + public DualStore() { + + } + + @Override + public void setConf(Configuration conf) { + if (conf instanceof HiveConf) init((HiveConf)conf); + else init(new HiveConf(conf, DualStore.class)); + } + + @Override + public Configuration getConf() { + return ourConf; + } + + @Override + public void setConf(HiveConf conf) { + init(conf); + } + + private void init(HiveConf conf) { + try { + ourConf = conf; + primaryConf = new HiveConf(ourConf); + String primaryRsName = ourConf.getVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_PRIMARY); + primaryConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, primaryRsName); + String txnHandlerName = ourConf.getVar(HiveConf.ConfVars.METASTORE_DUAL_TXNHANDLER); + primaryConf.setVar(HiveConf.ConfVars.METASTORE_TXNHANDLER_IMPL, txnHandlerName); + primaryRawStore = MetaStoreUtils.getClass(primaryRsName).newInstance(); + primaryRawStore.setConf(conf); + txnHandler = TxnUtils.getTxnHandler(primaryConf); + + secondaryConf = new HiveConf(ourConf); + String secondaryRsName = ourConf.getVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_SECONDARY); + secondaryConf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, secondaryRsName); + secondaryConf.setVar(HiveConf.ConfVars.METASTORE_TXNHANDLER_IMPL, txnHandlerName); + secondaryRawStore = MetaStoreUtils.getClass(secondaryRsName).newInstance(); + secondaryRawStore.setConf(conf); + + String readRsName = ourConf.getVar(HiveConf.ConfVars.METASTORE_DUAL_RAWSTORE_READER); + if ("primary".equals(readRsName)) readRawStore = primaryRawStore; + else if ("secondary".equals(readRsName)) readRawStore = secondaryRawStore; + else throw new RuntimeException("Unknown rawStore for reading: " + readRsName); + + threadPool = Executors.newSingleThreadExecutor(); + + buildMethodMap(); + } catch (Exception e) { + LOG.error("Unable to instantiate wrapped stores", e); + throw new RuntimeException(e); + } + } + + private class SecondaryRunner implements Runnable { + private final Method method; + private final Object[] args; + + SecondaryRunner(Method m, Object... args) { + method = m; + this.args = args; + } + + @Override + public void run() { + try { + method.invoke(secondaryRawStore, args); + } catch (IllegalAccessException|InvocationTargetException e) { + LOG.error("Failed to invoke method " + method.getName() + " on secondary store. " + + "Secondary store is now out of sync.", e); + } + } + } + + @Override + public void shutdown() { + threadPool.execute(new SecondaryRunner(methods.get("shutdown"), null)); + primaryRawStore.shutdown(); + + } + + @Override + public boolean openTransaction() { + threadPool.execute(new SecondaryRunner(methods.get("openTransaction"), null)); + return primaryRawStore.openTransaction(); + } + + @Override + public boolean commitTransaction() { + threadPool.execute(new SecondaryRunner(methods.get("commitTransaction"), null)); + return primaryRawStore.commitTransaction(); + } + + @Override + public void rollbackTransaction() { + threadPool.execute(new SecondaryRunner(methods.get("rollbackTransaction"), null)); + primaryRawStore.rollbackTransaction(); + + } + + @Override + public void createDatabase(Database db) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("createDatabase"), db)); + primaryRawStore.createDatabase(db); + } + + @Override + public Database getDatabase(String name) throws NoSuchObjectException { + return readRawStore.getDatabase(name); + } + + @Override + public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("dropDatabase"), dbname)); + return primaryRawStore.dropDatabase(dbname); + } + + @Override + public boolean alterDatabase(String dbname, Database db) + throws NoSuchObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterDatabase"), dbname, db)); + return primaryRawStore.alterDatabase(dbname, db); + } + + @Override + public List getDatabases(String pattern) throws MetaException { + return readRawStore.getDatabases(pattern); + } + + @Override + public List getAllDatabases() throws MetaException { + return readRawStore.getAllDatabases(); + } + + @Override + public boolean createType(Type type) { + threadPool.execute(new SecondaryRunner(methods.get("createType"), type)); + return primaryRawStore.createType(type); + } + + @Override + public Type getType(String typeName) { + return readRawStore.getType(typeName); + } + + @Override + public boolean dropType(String typeName) { + threadPool.execute(new SecondaryRunner(methods.get("dropType"), typeName)); + return primaryRawStore.dropType(typeName); + } + + @Override + public void createTable(Table tbl) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("createTable"), tbl)); + primaryRawStore.createTable(tbl); + } + + @Override + public boolean dropTable(String dbName, String tableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("dropTable"), dbName, tableName)); + return primaryRawStore.dropTable(dbName, tableName); + } + + @Override + public Table getTable(String dbName, String tableName) throws MetaException { + return readRawStore.getTable(dbName, tableName); + } + + @Override + public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("addPartition"), part)); + return primaryRawStore.addPartition(part); + } + + @Override + public boolean addPartitions(String dbName, String tblName, List parts) + throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("addPartitions1"), dbName, tblName, parts)); + return primaryRawStore.addPartitions(dbName, tblName, parts); + } + + @Override + public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, + boolean ifNotExists) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("addPartitions2"), dbName, tblName, + partitionSpec, ifNotExists)); + return primaryRawStore.addPartitions(dbName, tblName, partitionSpec, ifNotExists); + } + + @Override + public Partition getPartition(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException { + return readRawStore.getPartition(dbName, tableName, part_vals); + } + + @Override + public boolean doesPartitionExist(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException { + return readRawStore.doesPartitionExist(dbName, tableName, part_vals); + } + + @Override + public boolean dropPartition(String dbName, String tableName, List part_vals) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("dropPartition"), dbName, tableName, part_vals)); + return primaryRawStore.dropPartition(dbName, tableName, part_vals); + } + + @Override + public List getPartitions(String dbName, String tableName, int max) + throws MetaException, NoSuchObjectException { + return readRawStore.getPartitions(dbName, tableName, max); + } + + @Override + public void alterTable(String dbname, String name, Table newTable) + throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterTable"), dbname, name, newTable)); + primaryRawStore.alterTable(dbname, name, newTable); + } + + @Override + public List getTables(String dbName, String pattern) throws MetaException { + return readRawStore.getTables(dbName, pattern); + } + + @Override + public List getTableMeta(String dbNames, String tableNames, + List tableTypes) throws MetaException { + return readRawStore.getTableMeta(dbNames, tableNames, tableTypes); + } + + @Override + public List getTableObjectsByName(String dbname, List tableNames) + throws MetaException, UnknownDBException { + return readRawStore.getTableObjectsByName(dbname, tableNames); + } + + @Override + public List getAllTables(String dbName) throws MetaException { + return readRawStore.getAllTables(dbName); + } + + @Override + public List listTableNamesByFilter(String dbName, String filter, short max_tables) + throws MetaException, UnknownDBException { + return readRawStore.listTableNamesByFilter(dbName, filter, max_tables); + } + + @Override + public List listPartitionNames(String db_name, String tbl_name, short max_parts) + throws MetaException { + return readRawStore.listPartitionNames(db_name, tbl_name, max_parts); + } + + @Override + public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, + short max_parts) throws MetaException { + return readRawStore.listPartitionNamesByFilter(db_name, tbl_name, filter, max_parts); + } + + @Override + public void alterPartition(String db_name, String tbl_name, List part_vals, + Partition new_part) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterPartition"), db_name, tbl_name, + part_vals, new_part)); + primaryRawStore.alterPartition(db_name, tbl_name, part_vals, new_part); + + } + + @Override + public void alterPartitions(String db_name, String tbl_name, List> part_vals_list, + List new_parts) + throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterPartitions"), db_name, tbl_name, + part_vals_list, new_parts)); + primaryRawStore.alterPartitions(db_name, tbl_name, part_vals_list, new_parts); + + } + + @Override + public boolean addIndex(Index index) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("addIndex"), index)); + return primaryRawStore.addIndex(index); + } + + @Override + public Index getIndex(String dbName, String origTableName, String indexName) + throws MetaException { + return readRawStore.getIndex(dbName, origTableName, indexName); + } + + @Override + public boolean dropIndex(String dbName, String origTableName, String indexName) + throws MetaException { + threadPool.execute(new SecondaryRunner(methods.get("dropIndex"), dbName, origTableName, indexName)); + return primaryRawStore.dropIndex(dbName, origTableName, indexName); + } + + @Override + public List getIndexes(String dbName, String origTableName, int max) throws MetaException { + return readRawStore.getIndexes(dbName, origTableName, max); + } + + @Override + public List listIndexNames(String dbName, String origTableName, short max) + throws MetaException { + return readRawStore.listIndexNames(dbName, origTableName, max); + } + + @Override + public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) + throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterIndex"), dbname, baseTblName, name, newIndex)); + primaryRawStore.alterIndex(dbname, baseTblName, name, newIndex); + } + + @Override + public List getPartitionsByFilter(String dbName, String tblName, String filter, + short maxParts) + throws MetaException, NoSuchObjectException { + return readRawStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + } + + @Override + public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + String defaultPartitionName, short maxParts, + List result) throws TException { + return readRawStore.getPartitionsByExpr(dbName, tblName, expr, defaultPartitionName, + maxParts, result); + } + + @Override + public int getNumPartitionsByFilter(String dbName, String tblName, String filter) + throws MetaException, NoSuchObjectException { + return readRawStore.getNumPartitionsByFilter(dbName, tblName, filter); + } + + @Override + public List getPartitionsByNames(String dbName, String tblName, + List partNames) + throws MetaException, NoSuchObjectException { + return readRawStore.getPartitionsByNames(dbName, tblName, partNames); + } + + @Override + public Table markPartitionForEvent(String dbName, String tblName, Map partVals, + PartitionEventType evtType) throws MetaException, + UnknownTableException, InvalidPartitionException, UnknownPartitionException { + threadPool.execute(new SecondaryRunner(methods.get("markPartitionForEvent"), dbName, tblName, + partVals, evtType)); + return primaryRawStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + } + + @Override + public boolean isPartitionMarkedForEvent(String dbName, String tblName, + Map partName, + PartitionEventType evtType) throws MetaException, + UnknownTableException, InvalidPartitionException, UnknownPartitionException { + return readRawStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + } + + @Override + public boolean addRole(String rowName, String ownerName) throws InvalidObjectException, + MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("addRole"), rowName, ownerName)); + return primaryRawStore.addRole(rowName, ownerName); + } + + @Override + public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("removeRole"), roleName)); + return primaryRawStore.removeRole(roleName); + } + + @Override + public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, + PrincipalType grantorType, boolean grantOption) + throws MetaException, NoSuchObjectException, InvalidObjectException { + threadPool.execute(new SecondaryRunner(methods.get("grantRole"), role, userName, principalType, + grantor, grantorType, grantOption)); + return primaryRawStore.grantRole(role, userName, principalType, grantor, grantorType, + grantOption); + } + + @Override + public boolean revokeRole(Role role, String userName, PrincipalType principalType, + boolean grantOption) throws MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("revokeRole"), role, userName, principalType, + grantOption)); + return primaryRawStore.revokeRole(role, userName, principalType, grantOption); + } + + @Override + public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) + throws InvalidObjectException, MetaException { + return readRawStore.getUserPrivilegeSet(userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + List groupNames) + throws InvalidObjectException, MetaException { + return readRawStore.getDBPrivilegeSet(dbName, userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + String userName, List groupNames) + throws InvalidObjectException, MetaException { + return readRawStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + String partition, String userName, + List groupNames) + throws InvalidObjectException, MetaException { + return readRawStore.getPartitionPrivilegeSet(dbName, tableName, partition, userName, groupNames); + } + + @Override + public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + String partitionName, String columnName, + String userName, + List groupNames) throws + InvalidObjectException, MetaException { + return readRawStore.getColumnPrivilegeSet(dbName, tableName, partitionName, columnName, + userName, groupNames); + } + + @Override + public List listPrincipalGlobalGrants(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalGlobalGrants(principalName, principalType); + } + + @Override + public List listPrincipalDBGrants(String principalName, + PrincipalType principalType, + String dbName) { + return readRawStore.listPrincipalDBGrants(principalName, principalType, dbName); + } + + @Override + public List listAllTableGrants(String principalName, + PrincipalType principalType, String dbName, + String tableName) { + return readRawStore.listAllTableGrants(principalName, principalType, dbName, tableName); + } + + @Override + public List listPrincipalPartitionGrants(String principalName, + PrincipalType principalType, + String dbName, String tableName, + List partValues, + String partName) { + return readRawStore.listPrincipalPartitionGrants(principalName, principalType, dbName, + tableName, partValues, partName); + } + + @Override + public List listPrincipalTableColumnGrants(String principalName, + PrincipalType principalType, + String dbName, String tableName, + String columnName) { + return readRawStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, + tableName, columnName); + } + + @Override + public List listPrincipalPartitionColumnGrants(String principalName, + PrincipalType principalType, + String dbName, + String tableName, + List partValues, + String partName, + String columnName) { + return readRawStore.listPrincipalPartitionColumnGrants(principalName, principalType, + dbName, tableName, partValues, partName, columnName); + } + + @Override + public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException, + MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("grantPrivileges"), privileges)); + return primaryRawStore.grantPrivileges(privileges); + } + + @Override + public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws + InvalidObjectException, MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("revokePrivileges"), privileges, grantOption)); + return primaryRawStore.revokePrivileges(privileges, grantOption); + } + + @Override + public Role getRole(String roleName) throws NoSuchObjectException { + return readRawStore.getRole(roleName); + } + + @Override + public List listRoleNames() { + return readRawStore.listRoleNames(); + } + + @Override + public List listRoles(String principalName, PrincipalType principalType) { + return readRawStore.listRoles(principalName, principalType); + } + + @Override + public List listRolesWithGrants(String principalName, + PrincipalType principalType) { + return readRawStore.listRolesWithGrants(principalName, principalType); + } + + @Override + public List listRoleMembers(String roleName) { + return readRawStore.listRoleMembers(roleName); + } + + @Override + public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, + String user_name, List group_names) + throws MetaException, NoSuchObjectException, InvalidObjectException { + return readRawStore.getPartitionWithAuth(dbName, tblName, partVals, user_name, group_names); + } + + @Override + public List getPartitionsWithAuth(String dbName, String tblName, short maxParts, + String userName, List groupNames) + throws MetaException, NoSuchObjectException, InvalidObjectException { + return readRawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); + } + + @Override + public List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, + short max_parts) + throws MetaException, NoSuchObjectException { + return readRawStore.listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); + } + + @Override + public List listPartitionsPsWithAuth(String db_name, String tbl_name, + List part_vals, short max_parts, + String userName, List groupNames) + throws MetaException, InvalidObjectException, NoSuchObjectException { + return readRawStore.listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, + userName, groupNames); + } + + @Override + public boolean updateTableColumnStatistics(ColumnStatistics colStats) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("updateTableColumnStatistics"), colStats)); + return primaryRawStore.updateTableColumnStatistics(colStats); + } + + @Override + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, + List partVals) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("updatePartitionColumnStatistics"), statsObj, + partVals)); + return primaryRawStore.updatePartitionColumnStatistics(statsObj, partVals); + } + + @Override + public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + List colName) + throws MetaException, NoSuchObjectException { + return readRawStore.getTableColumnStatistics(dbName, tableName, colName); + } + + @Override + public List getPartitionColumnStatistics(String dbName, String tblName, + List partNames, + List colNames) + throws MetaException, NoSuchObjectException { + return readRawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + } + + @Override + public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + List partVals, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("deletePartitionColumnStatistics"), + dbName, tableName, partName, partVals, colName)); + return primaryRawStore.deletePartitionColumnStatistics(dbName, tableName, partName, partVals, + colName); + } + + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws + NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("deleteTableColumnStatistics"), dbName, + tableName, colName)); + return primaryRawStore.deleteTableColumnStatistics(dbName, tableName, colName); + } + + @Override + public long cleanupEvents() { + threadPool.execute(new SecondaryRunner(methods.get("cleanupEvents"), null)); + return primaryRawStore.cleanupEvents(); + } + + @Override + public boolean addToken(String tokenIdentifier, String delegationToken) { + threadPool.execute(new SecondaryRunner(methods.get("addToken"), tokenIdentifier, delegationToken)); + return primaryRawStore.addToken(tokenIdentifier, delegationToken); + } + + @Override + public boolean removeToken(String tokenIdentifier) { + threadPool.execute(new SecondaryRunner(methods.get("removeToken"), tokenIdentifier)); + return primaryRawStore.removeToken(tokenIdentifier); + } + + @Override + public String getToken(String tokenIdentifier) { + return readRawStore.getToken(tokenIdentifier); + } + + @Override + public List getAllTokenIdentifiers() { + return readRawStore.getAllTokenIdentifiers(); + } + + @Override + public int addMasterKey(String key) throws MetaException { + threadPool.execute(new SecondaryRunner(methods.get("addMasterKey"), key)); + return primaryRawStore.addMasterKey(key); + } + + @Override + public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException, + MetaException { + threadPool.execute(new SecondaryRunner(methods.get("updateMasterKey"), seqNo, key)); + primaryRawStore.updateMasterKey(seqNo, key); + + } + + @Override + public boolean removeMasterKey(Integer keySeq) { + threadPool.execute(new SecondaryRunner(methods.get("removeMasterKey"), keySeq)); + return primaryRawStore.removeMasterKey(keySeq); + } + + @Override + public String[] getMasterKeys() { + return readRawStore.getMasterKeys(); + } + + @Override + public void verifySchema() throws MetaException { + readRawStore.verifySchema(); + } + + @Override + public String getMetaStoreSchemaVersion() throws MetaException { + return readRawStore.getMetaStoreSchemaVersion(); + } + + @Override + public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException { + threadPool.execute(new SecondaryRunner(methods.get("setMetaStoreSchemaVersion"), version, comment)); + primaryRawStore.setMetaStoreSchemaVersion(version, comment); + + } + + @Override + public void dropPartitions(String dbName, String tblName, List partNames) + throws MetaException, NoSuchObjectException { + threadPool.execute(new SecondaryRunner(methods.get("dropPartitions"), dbName, tblName, partNames)); + primaryRawStore.dropPartitions(dbName, tblName, partNames); + + } + + @Override + public List listPrincipalDBGrantsAll(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalDBGrantsAll(principalName, principalType); + } + + @Override + public List listPrincipalTableGrantsAll(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalTableGrantsAll(principalName, principalType); + } + + @Override + public List listPrincipalPartitionGrantsAll(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalPartitionGrantsAll(principalName, principalType); + } + + @Override + public List listPrincipalTableColumnGrantsAll(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalTableColumnGrantsAll(principalName, principalType); + } + + @Override + public List listPrincipalPartitionColumnGrantsAll(String principalName, + PrincipalType principalType) { + return readRawStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType); + } + + @Override + public List listGlobalGrantsAll() { + return readRawStore.listGlobalGrantsAll(); + } + + @Override + public List listDBGrantsAll(String dbName) { + return readRawStore.listDBGrantsAll(dbName); + } + + @Override + public List listPartitionColumnGrantsAll(String dbName, String tableName, + String partitionName, + String columnName) { + return readRawStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + } + + @Override + public List listTableGrantsAll(String dbName, String tableName) { + return readRawStore.listTableGrantsAll(dbName, tableName); + } + + @Override + public List listPartitionGrantsAll(String dbName, String tableName, + String partitionName) { + return readRawStore.listPartitionGrantsAll(dbName, tableName, partitionName); + } + + @Override + public List listTableColumnGrantsAll(String dbName, String tableName, + String columnName) { + return readRawStore.listTableColumnGrantsAll(dbName, tableName, columnName); + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("createFunction"), func)); + primaryRawStore.createFunction(func); + } + + @Override + public void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException { + threadPool.execute(new SecondaryRunner(methods.get("alterFunction"), dbName, funcName, newFunction)); + primaryRawStore.alterFunction(dbName, funcName, newFunction); + } + + @Override + public void dropFunction(String dbName, String funcName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { + threadPool.execute(new SecondaryRunner(methods.get("dropFunction"), dbName, funcName)); + primaryRawStore.dropFunction(dbName, funcName); + + } + + @Override + public Function getFunction(String dbName, String funcName) throws MetaException { + return readRawStore.getFunction(dbName, funcName); + } + + @Override + public List getAllFunctions() throws MetaException { + return readRawStore.getAllFunctions(); + } + + @Override + public List getFunctions(String dbName, String pattern) throws MetaException { + return readRawStore.getFunctions(dbName, pattern); + } + + @Override + public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, + List colNames) + throws MetaException, NoSuchObjectException { + return readRawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + } + + // Notifications are just done on the primary to avoid double notifications + @Override + public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) { + return primaryRawStore.getNextNotification(rqst); + } + + @Override + public void addNotificationEvent(NotificationEvent event) { + primaryRawStore.addNotificationEvent(event); + } + + @Override + public void cleanNotificationEvents(int olderThan) { + primaryRawStore.cleanNotificationEvents(olderThan); + } + + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() { + return primaryRawStore.getCurrentNotificationEventId(); + } + + @Override + public void flushCache() { + threadPool.execute(new SecondaryRunner(methods.get("flushCache"))); + primaryRawStore.flushCache(); + } + + // Just call the file metadata on the primary. If the primary doesn't support it we'll just + // let the exception flow through. + @Override + public ByteBuffer[] getFileMetadata(List fileIds) throws MetaException { + return readRawStore.getFileMetadata(fileIds); + } + + @Override + public void putFileMetadata(List fileIds, List metadata, + FileMetadataExprType type) throws MetaException { + readRawStore.putFileMetadata(fileIds, metadata, type); + } + + @Override + public boolean isFileMetadataSupported() { + return readRawStore.isFileMetadataSupported(); + } + + @Override + public void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] expr, + ByteBuffer[] metadatas, ByteBuffer[] exprResults, + boolean[] eliminated) throws MetaException { + readRawStore.getFileMetadataByExpr(fileIds, type, expr, metadatas, exprResults, eliminated); + + } + + @Override + public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { + return readRawStore.getFileMetadataHandler(type); + } + + @Override + public int getTableCount() throws MetaException { + return readRawStore.getTableCount(); + } + + @Override + public int getPartitionCount() throws MetaException { + return readRawStore.getPartitionCount(); + } + + @Override + public int getDatabaseCount() throws MetaException { + return readRawStore.getDatabaseCount(); + } + + // Transaction operations are only executed on the primary. Else I shudder to think of the + // deadlocks we'd get. + @Override + public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { + return txnHandler.getOpenTxnsInfo(); + } + + @Override + public GetOpenTxnsResponse getOpenTxns() throws MetaException { + return txnHandler.getOpenTxns(); + } + + @Override + public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { + return txnHandler.openTxns(rqst); + } + + @Override + public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException { + txnHandler.abortTxn(rqst); + + } + + @Override + public void commitTxn(CommitTxnRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException { + txnHandler.commitTxn(rqst); + + } + + @Override + public LockResponse lock(LockRequest rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException { + return txnHandler.lock(rqst); + } + + @Override + public LockResponse checkLock(CheckLockRequest rqst) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { + return txnHandler.checkLock(rqst); + } + + @Override + public void unlock(UnlockRequest rqst) + throws NoSuchLockException, TxnOpenException, MetaException { + txnHandler.unlock(rqst); + } + + @Override + public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { + return txnHandler.showLocks(rqst); + } + + @Override + public void heartbeat(HeartbeatRequest ids) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { + txnHandler.heartbeat(ids); + } + + @Override + public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) + throws MetaException { + return txnHandler.heartbeatTxnRange(rqst); + } + + @Override + public void compact(CompactionRequest rqst) throws MetaException { + txnHandler.compact(rqst); + } + + @Override + public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException { + return txnHandler.showCompact(rqst); + } + + @Override + public void addDynamicPartitions(AddDynamicPartitions rqst) + throws NoSuchTxnException, TxnAbortedException, MetaException { + txnHandler.addDynamicPartitions(rqst); + } + + @Override + public void performTimeOuts() { + txnHandler.performTimeOuts(); + } + + @Override + public Set findPotentialCompactions(int maxAborted) throws MetaException { + return txnHandler.findPotentialCompactions(maxAborted); + } + + @Override + public void setRunAs(long cq_id, String user) throws MetaException { + txnHandler.setRunAs(cq_id, user); + } + + @Override + public CompactionInfo findNextToCompact(String workerId) throws MetaException { + return txnHandler.findNextToCompact(workerId); + } + + @Override + public void markCompacted(CompactionInfo info) throws MetaException { + txnHandler.markCompacted(info); + } + + @Override + public List findReadyToClean() throws MetaException { + return txnHandler.findReadyToClean(); + } + + @Override + public void markCleaned(CompactionInfo info) throws MetaException { + txnHandler.markCleaned(info); + } + + @Override + public void cleanEmptyAbortedTxns() throws MetaException { + txnHandler.cleanEmptyAbortedTxns(); + } + + @Override + public void revokeFromLocalWorkers(String hostname) throws MetaException { + txnHandler.revokeFromLocalWorkers(hostname); + } + + @Override + public void revokeTimedoutWorkers(long timeout) throws MetaException { + txnHandler.revokeTimedoutWorkers(timeout); + } + + @Override + public List findColumnsWithStats(CompactionInfo ci) throws MetaException { + return txnHandler.findColumnsWithStats(ci); + } + + @Override + public void setCompactionHighestTxnId(CompactionInfo ci, long highestTxnId) throws MetaException { + txnHandler.setCompactionHighestTxnId(ci, highestTxnId); + } + + @Override + public int numLocksInLockTable() throws SQLException, MetaException { + return txnHandler.numLocksInLockTable(); + } + + @Override + public long setTimeout(long milliseconds) { + return txnHandler.setTimeout(milliseconds); + } + + private synchronized void buildMethodMap() throws NoSuchMethodException { + // Yes, looking these up once and storing the results is worth it. Based on some simple + // quick micro-benchmarking, doing 10,000,000 calls of method.invoke (without lookup) takes 0 + // .915 seconds, doing 10 million with lookup takes 4.885 seconds. Worse yet, lookup is in + // the critical path and the invocation is not (since it's done in the secondary thread). + if (methods == null) { + methods = new HashMap<>(200); + + { + Method[] methods = RawStore.class.getMethods(); + for (Method m : methods) { + Class[] args = m.getParameterTypes(); + StringBuilder bldr = new StringBuilder(); + for (Class a : args) bldr.append(a.getName()).append(':'); + LOG.debug("Method name " + m.getName() + " args: " + bldr.toString()); + } + } + + methods.put("addIndex", RawStore.class.getMethod("addIndex", Index.class)); + methods.put("addMasterKey", RawStore.class.getMethod("addMasterKey", String.class)); + methods.put("addPartition", RawStore.class.getMethod("addPartition", Partition.class)); + methods.put("addPartitions1", RawStore.class.getMethod("addPartitions", String.class, + String.class, List.class)); + methods.put("addPartitions2", RawStore.class.getMethod("addPartitions", String.class, + String.class, PartitionSpecProxy.class, boolean.class)); + methods.put("addRole", RawStore.class.getMethod("addRole", String.class, String.class)); + methods.put("addToken", RawStore.class.getMethod("addToken", String.class, String.class)); + methods.put("alterDatabase", RawStore.class.getMethod("alterDatabase", String.class, + Database.class)); + methods.put("alterIndex", RawStore.class.getMethod("alterIndex", String.class, String.class, + String.class, Index.class)); + methods.put("alterTable", RawStore.class.getMethod("alterTable", String.class, String.class, + Table.class)); + methods.put("alterFunction", RawStore.class.getMethod("alterFunction", String.class, + String.class, Function.class)); + methods.put("alterPartition", RawStore.class.getMethod("alterPartition", String.class, + String.class, List.class, Partition.class)); + methods.put("alterPartitions", RawStore.class.getMethod("alterPartitions", String.class, + String.class, List.class, List.class)); + methods.put("cleanupEvents", RawStore.class.getMethod("cleanupEvents", null)); + methods.put("commitTransaction", RawStore.class.getMethod("commitTransaction", null)); + methods.put("createDatabase", RawStore.class.getMethod("createDatabase", Database.class)); + methods.put("createFunction", RawStore.class.getMethod("createFunction", Function.class)); + methods.put("createTable", RawStore.class.getMethod("createTable", Table.class)); + methods.put("createType", RawStore.class.getMethod("createType", Type.class)); + methods.put("deletePartitionColumnStatistics", + RawStore.class.getMethod("deletePartitionColumnStatistics", String.class, String.class, + String.class, List.class, String.class)); + methods.put("deleteTableColumnStatistics", + RawStore.class.getMethod("deleteTableColumnStatistics", String.class, String.class, + String.class)); + methods.put("dropDatabase", RawStore.class.getMethod("dropDatabase", String.class)); + methods.put("dropFunction", RawStore.class.getMethod("dropFunction", String.class, + String.class)); + methods.put("dropIndex", RawStore.class.getMethod("dropIndex", String.class, + String.class, String.class)); + methods.put("dropPartition", RawStore.class.getMethod("dropPartition", String.class, + String.class, List.class)); + methods.put("dropPartitions", RawStore.class.getMethod("dropPartitions", String.class, + String.class, List.class)); + methods.put("dropTable", RawStore.class.getMethod("dropTable", String.class, String.class)); + methods.put("dropType", RawStore.class.getMethod("dropType", String.class)); + methods.put("flushCache", RawStore.class.getMethod("flushCache")); + methods.put("grantPrivileges", RawStore.class.getMethod("grantPrivileges", + PrivilegeBag.class)); + methods.put("grantRole", RawStore.class.getMethod("grantRole", Role.class, String.class, + PrincipalType.class, String.class, PrincipalType.class, boolean.class)); + methods.put("markPartitionForEvent", RawStore.class.getMethod("markPartitionForEvent", + String.class, String.class, Map.class, PartitionEventType.class)); + methods.put("openTransaction", RawStore.class.getMethod("openTransaction", null)); + methods.put("removeMasterKey", RawStore.class.getMethod("removeMasterKey", Integer.class)); + methods.put("removeRole", RawStore.class.getMethod("removeRole", String.class)); + methods.put("removeToken", RawStore.class.getMethod("removeToken", String.class)); + methods.put("revokePrivileges", RawStore.class.getMethod("revokePrivileges", + PrivilegeBag.class, boolean.class)); + methods.put("revokeRole", RawStore.class.getMethod("revokeRole", Role.class, String.class, + PrincipalType.class, boolean.class)); + methods.put("rollbackTransaction", RawStore.class.getMethod("rollbackTransaction", null)); + methods.put("setMetaStoreSchemaVersion", RawStore.class.getMethod("setMetaStoreSchemaVersion", + String.class, String.class)); + methods.put("shutdown", RawStore.class.getMethod("shutdown", null)); + methods.put("updateMasterKey", RawStore.class.getMethod("updateMasterKey", Integer.class, + String.class)); + methods.put("updatePartitionColumnStatistics", + RawStore.class.getMethod("updatePartitionColumnStatistics", ColumnStatistics.class, + List.class)); + methods.put("updateTableColumnStatistics", + RawStore.class.getMethod("updateTableColumnStatistics", ColumnStatistics.class)); + } + } +} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 0ffdbe0..600c8b8 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -146,6 +146,8 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep openTransaction(); try { Database dbCopy = db.deepCopy(); + // Match ObjectStore semantics for db owner type + if (dbCopy.getOwnerType() == null) dbCopy.setOwnerType(PrincipalType.USER); dbCopy.setName(HiveStringUtils.normalizeIdentifier(dbCopy.getName())); // HiveMetaStore already checks for existence of the database, don't recheck getHBase().putDb(dbCopy); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 71ef565..64466aa 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -86,7 +86,7 @@ public static ValidTxnList createValidCompactTxnList(GetOpenTxnsInfoResponse txn * @return txn handler */ public static TxnHandler getTxnHandler(HiveConf conf) { - String className = conf.getVar(HiveConf.ConfVars.METASTORE_TXN_HANDLER_IMPL); + String className = conf.getVar(HiveConf.ConfVars.METASTORE_TXNHANDLER_IMPL); try { TxnHandler handler = ((Class) MetaStoreUtils.getClass( className)).newInstance();