commit 0c43ec1e3b952b2e34129c9c22965f11c20bb328 Author: Daniel Dai Date: Tue Apr 19 03:20:46 2016 -0700 Omid Integration diff --git a/data/conf/tez/hive-site.xml b/data/conf/tez/hive-site.xml index a7129fc..0dcbee6 100644 --- a/data/conf/tez/hive-site.xml +++ b/data/conf/tez/hive-site.xml @@ -268,8 +268,8 @@ - hive.orc.splits.ms.footer.cache.enabled - true + hive.metastore.hbase.connection.class + org.apache.hadoop.hive.metastore.hbase.OmidHBaseConnection diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java index d4cd818..4aee48f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/HBaseIntegrationTests.java @@ -27,11 +27,13 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.QTestUtil; import org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; import org.apache.hadoop.hive.ql.session.SessionState; -import java.io.IOException; +import com.yahoo.omid.tso.TSOServer; + import java.util.HashMap; import java.util.Map; @@ -43,21 +45,16 @@ private static final Logger LOG = LoggerFactory.getLogger(HBaseIntegrationTests.class.getName()); protected static HBaseTestingUtility utility; + protected static TSOServer tso; protected static HBaseAdmin admin; protected static Map emptyParameters = new HashMap<>(); protected static HiveConf conf; + protected static boolean testingOmid; protected HBaseStore store; protected Driver driver; protected static void startMiniCluster() throws Exception { - String connectionClassName = - System.getProperty(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS.varname); - boolean testingTephra = - connectionClassName != null && connectionClassName.equals(TephraHBaseConnection.class.getName()); - if (testingTephra) { - LOG.info("Testing with Tephra"); - } Configuration hbaseConf = HBaseConfiguration.create(); hbaseConf.setInt("hbase.master.info.port", -1); utility = new HBaseTestingUtility(hbaseConf); @@ -65,16 +62,24 @@ protected static void startMiniCluster() throws Exception { conf = new HiveConf(utility.getConfiguration(), HBaseIntegrationTests.class); admin = utility.getHBaseAdmin(); HBaseStoreTestUtil.initHBaseMetastore(admin, null); + + String connectionClassName = + System.getProperty(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS.varname); + + testingOmid = + connectionClassName != null && connectionClassName.equals(OmidHBaseConnection.class.getName()); + if (testingOmid) { + tso = QTestUtil.setupTSO(utility, utility.getConfiguration()); + } } protected static void shutdownMiniCluster() throws Exception { + if (testingOmid) { + tso.stopAndWait(); + } utility.shutdownMiniCluster(); } - protected void setupConnection() throws IOException { - - } - protected void setupDriver() { // This chicanery is necessary to make the driver work. Hive tests need the pfile file // system, while the hbase one uses something else. So first make sure we've configured our @@ -95,7 +100,6 @@ protected void setupDriver() { conf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED, true); conf.setVar(HiveConf.ConfVars.USERS_IN_ADMIN_ROLE, System.getProperty("user.name")); conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE,"nonstrict"); - //HBaseReadWrite.setTestConnection(hconn); SessionState.start(new CliSessionState(conf)); driver = new Driver(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java index 51d96dd..23abaab 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -68,7 +68,6 @@ public static void shutdown() throws Exception { @Before public void setup() throws IOException { - setupConnection(); setupHBaseStore(); store.backdoor().getStatsCache().resetCounters(); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index af60660..d14010d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -91,7 +91,6 @@ public static void shutdown() throws Exception { @Before public void setup() throws IOException { - setupConnection(); setupHBaseStore(); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java index d4966b9..b6af762 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreSql.java @@ -49,7 +49,6 @@ public static void shutdown() throws Exception { @Before public void before() throws IOException { - setupConnection(); setupDriver(); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 2cc1373..882bc9c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -90,7 +90,6 @@ public static void shutdown() throws Exception { @Before public void setup() throws IOException { - setupConnection(); setupHBaseStore(); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index c29e46a..dc9af8c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -58,7 +58,6 @@ public static void shutdown() throws Exception { @Before public void setup() throws IOException { - setupConnection(); setupHBaseStore(); try { md = MessageDigest.getInstance("MD5"); diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index b042774..a51fee4 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -348,7 +348,14 @@ commons-logging - + + + com.yahoo.omid + commit-table + ${omid.version} + tests + test + diff --git a/itests/util/pom.xml b/itests/util/pom.xml index 4789586..54dd98a 100644 --- a/itests/util/pom.xml +++ b/itests/util/pom.xml @@ -166,5 +166,39 @@ ${hbase.version} tests + + com.yahoo.omid + tso-server + ${omid.version} + + + com.yahoo.omid + hbase0-shims + + + + + com.yahoo.omid + tso-server + ${omid.version} + tests + + + com.yahoo.omid + hbase0-shims + + + + + com.yahoo.omid + commit-table + ${omid.version} + + + com.yahoo.omid + hbase0-shims + + + diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 8473436..bb4978f 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -73,7 +73,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hive.cli.CliDriver; import org.apache.hadoop.hive.cli.CliSessionState; @@ -90,6 +92,8 @@ import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.hbase.OmidHBaseConnection; +import org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -125,6 +129,13 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; +import com.google.inject.Guice; +import com.google.inject.Injector; +import com.yahoo.omid.TestUtils; +import com.yahoo.omid.tools.hbase.OmidTableManager; +import com.yahoo.omid.tso.TSOMockModule; +import com.yahoo.omid.tso.TSOServer; +import com.yahoo.omid.tso.TSOServerCommandLineConfig; import junit.framework.TestSuite; @@ -375,6 +386,44 @@ private void startMiniHBaseCluster() throws Exception { "org.apache.hadoop.hive.metastore.hbase.HBaseStoreTestUtil") .getMethod("initHBaseMetastore", HBaseAdmin.class, HiveConf.class); initHBaseMetastoreMethod.invoke(null, admin, conf); + String connectionClassName = + conf.get(HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS.varname); + if (connectionClassName.equals(VanillaHBaseConnection.class.getName())) { + LOG.info("Testing with connection class " + connectionClassName); + } + boolean testingOmid = + connectionClassName != null && connectionClassName.equals(OmidHBaseConnection.class.getName()); + if (testingOmid) { + setupTSO(utility, conf); + } + } + + // setup tso and return the instance of tso, includes: + // 1. start Omid tso process + // 2. create required tables in hbase + // 3. put config to HiveConf + public static TSOServer setupTSO(HBaseTestingUtility hBaseUtils, Configuration conf) throws Exception { + Injector injector = Guice.createInjector(new TSOMockModule(TSOServerCommandLineConfig.parseConfig(new String[]{"-port", "1234", "-maxItems", "1000"}))); + LOG.info("Starting TSO"); + TSOServer tso = injector.getInstance(TSOServer.class); + tso.startAndWait(); + TestUtils.waitForSocketListening("localhost", 1234, 100); + LOG.info("Finished loading TSO"); + conf.set("tso.host", "localhost"); + conf.set("tso.port", "1234"); + + hBaseUtils + .createTable(Bytes.toBytes(com.yahoo.omid.timestamp.storage.HBaseTimestampStorage + .TIMESTAMP_TABLE_DEFAULT_NAME), new byte[][]{com.yahoo.omid.timestamp.storage + .HBaseTimestampStorage.TSO_FAMILY}, Integer.MAX_VALUE); + + // Create commit table + String[] args = new String[]{com.yahoo.omid.tools.hbase.OmidTableManager.COMMIT_TABLE_COMMAND_NAME, + "-numRegions", "1"}; + OmidTableManager omidTableManager = new OmidTableManager(args); + omidTableManager.executeActionsOnHBase(hBaseUtils.getConfiguration()); + + return tso; } public QTestUtil(String outDir, String logDir, MiniClusterType clusterType, diff --git a/metastore/pom.xml b/metastore/pom.xml index 18c1f9c..9e33dc1 100644 --- a/metastore/pom.xml +++ b/metastore/pom.xml @@ -207,6 +207,35 @@ tephra-hbase-compat-1.0 ${tephra.version} + + com.yahoo.omid + hbase-client + ${omid.version} + + + com.yahoo.omid + hbase0-shims + + + org.apache.hbase + hbase-hadoop1-compat + + + org.apache.hadoop + hadoop-core + + + + + com.yahoo.omid + hbase1-shims + ${omid.version} + + + com.yahoo.omid + transaction-client + ${omid.version} + junit diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c9fadad..85a1b85 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1675,11 +1675,8 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { // call dropPartition on each of the table's partitions to follow the // procedure for cleanly dropping partitions. - while (true) { - List partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize); - if (partsToDelete == null || partsToDelete.isEmpty()) { - break; - } + List partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize); + if (partsToDelete != null && !partsToDelete.isEmpty()) { List partNames = new ArrayList(); for (Partition part : partsToDelete) { if (checkLocation && part.getSd() != null && diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index a73dbeb..82ecf88 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -446,7 +446,7 @@ public boolean dropPartition(String dbName, String tableName, List part_ buildExternalPartName(dbName, tableName, part_vals)); commit = true; return true; - } catch (IOException e) { + } catch (Exception e) { LOG.error("Unable to delete db" + e); throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName, part_vals)); @@ -748,6 +748,7 @@ public void alterIndex(String dbname, String baseTblName, String name, Index new try { getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result); + commit = true; return result; } finally { commitOrRoleBack(commit); @@ -765,15 +766,17 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, boolean commit = false; openTransaction(); try { + boolean hasUnknownPartitions; if (exprTree == null) { List partNames = new LinkedList(); - boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( + hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn( table, expr, defaultPartitionName, maxParts, partNames); result.addAll(getPartitionsByNames(dbName, tblName, partNames)); - return hasUnknownPartitions; } else { - return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); + hasUnknownPartitions = getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result); } + commit = true; + return hasUnknownPartitions; } finally { commitOrRoleBack(commit); } @@ -2555,8 +2558,6 @@ public void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, @Override public void putFileMetadata(List fileIds, List metadata, FileMetadataExprType type) throws MetaException { - openTransaction(); - boolean commit = false; try { ByteBuffer[][] addedVals = null; ByteBuffer[] addedCols = null; @@ -2568,12 +2569,9 @@ public void putFileMetadata(List fileIds, List metadata, } } getHBase().storeFileMetadata(fileIds, metadata, addedCols, addedVals); - commit = true; } catch (IOException | InterruptedException e) { LOG.error("Unable to store file metadata", e); throw new MetaException("Error storing file metadata " + e.getMessage()); - } finally { - commitOrRoleBack(commit); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidHBaseConnection.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidHBaseConnection.java new file mode 100644 index 0000000..dbf0dee --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidHBaseConnection.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.hbase; + +import java.io.IOException; + +import org.apache.hadoop.hbase.client.HTableInterface; + +import com.yahoo.omid.transaction.HBaseTransactionManager; +import com.yahoo.omid.transaction.OmidInstantiationException; +import com.yahoo.omid.transaction.TTable; +import com.yahoo.omid.transaction.Transaction; +import com.yahoo.omid.transaction.TransactionException; +import com.yahoo.omid.transaction.TransactionManager; + +public class OmidHBaseConnection extends VanillaHBaseConnection { + TransactionManager tm = null; + ThreadLocal transaction = new ThreadLocal(); + @Override + public void connect() throws IOException { + try { + super.connect(); + if (tm==null) { + tm = HBaseTransactionManager.newBuilder().withConfiguration(conf).build(); + } + } catch (OmidInstantiationException e) { + throw new IOException(e); + } + } + + @Override + public void beginTransaction() throws IOException { + try { + transaction.set(tm.begin()); + } catch (TransactionException e) { + throw new IOException(e); + } + } + + @Override + public void commitTransaction() throws IOException { + try { + if (transaction.get()!=null) { + tm.commit(transaction.get()); + transaction.set(null); + } + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public void rollbackTransaction() throws IOException { + try { + if (transaction.get()!=null) { + tm.rollback(transaction.get()); + transaction.set(null); + } + } catch (TransactionException e) { + throw new IOException(e); + } + } + + @Override + public void flush(HTableInterface htab) throws IOException { + if (transaction.get() == null) { + super.flush(htab); + } + // NO-OP as we want to flush at commit time + } + + @Override + public HTableInterface getHBaseTable(String tableName, boolean force) throws IOException { + if (transaction.get() != null) { + return new OmidTable(new TTable(conf, tableName), transaction.get()); + } else { + // If no transaction context, using regular table + return super.getHBaseTable(tableName, force); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidTable.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidTable.java new file mode 100644 index 0000000..f71db40 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/OmidTable.java @@ -0,0 +1,372 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.hbase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.coprocessor.Batch.Call; +import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; + +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; +import com.yahoo.omid.transaction.TTable; +import com.yahoo.omid.transaction.Transaction; + +/** + * HTable wrapper for the Omid TTable + */ + +public class OmidTable implements HTableInterface { + private TTable ttable; + Transaction transaction; + + public OmidTable(TTable ttable, Transaction transaction) { + this.ttable = ttable; + this.transaction = transaction; + } + + @Override + public TableName getName() { + return ttable.getHTable().getName(); + } + + @Override + public Configuration getConfiguration() { + return ttable.getConfiguration(); + } + + @Override + public HTableDescriptor getTableDescriptor() throws IOException { + return ttable.getTableDescriptor(); + } + + @Override + public boolean exists(Get get) throws IOException { + if (transaction==null) { + return ttable.getHTable().exists(get); + } + return ttable.exists(transaction, get); + } + + @Override + public boolean[] existsAll(List gets) throws IOException { + if (transaction==null) { + return ttable.getHTable().existsAll(gets); + } + boolean[] exists = new boolean[gets.size()]; + for (int i=0;i actions, Object[] results) + throws IOException, InterruptedException { + if (actions.size()!=results.length) { + throw new IOException("size of actions and results should be equal"); + } + for (int i=0;i actions) throws IOException, + InterruptedException { + Object[] results = new Object[actions.size()]; + batch(actions, results); + return results; + } + + @Override + public void batchCallback(List actions, Object[] results, + Callback callback) throws IOException, InterruptedException { + throw new RuntimeException("Not implemented"); + } + + @Override + public Object[] batchCallback(List actions, + Callback callback) throws IOException, InterruptedException { + throw new RuntimeException("Not implemented"); + } + + @Override + public Result get(Get get) throws IOException { + if (transaction==null) { + return ttable.getHTable().get(get); + } + return ttable.get(transaction, get); + } + + @Override + public Result[] get(List gets) throws IOException { + if (transaction==null) { + return ttable.getHTable().get(gets); + } + return ttable.get(transaction, gets); + } + + @Override + public ResultScanner getScanner(Scan scan) throws IOException { + if (transaction==null) { + return ttable.getHTable().getScanner(scan); + } + ResultScanner scanner = ttable.getScanner(transaction, scan); + return scanner; + } + + @Override + public ResultScanner getScanner(byte[] family) throws IOException { + if (transaction==null) { + return ttable.getHTable().getScanner(family); + } + return ttable.getScanner(transaction, family); + } + + @Override + public ResultScanner getScanner(byte[] family, byte[] qualifier) + throws IOException { + if (transaction==null) { + return ttable.getHTable().getScanner(family, qualifier); + } + return ttable.getScanner(transaction, family, qualifier); + } + + @Override + public void put(Put put) throws IOException { + if (transaction==null) { + ttable.getHTable().put(put); + return; + } + ttable.put(transaction, put); + } + + @Override + public void put(List puts) throws IOException { + if (transaction==null) { + ttable.getHTable().put(puts); + } + ttable.put(transaction, puts); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Put put) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, Put put) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public void delete(Delete delete) throws IOException { + if (transaction==null) { + ttable.getHTable().delete(delete); + } + ttable.delete(transaction, delete); + } + + @Override + public void delete(List deletes) throws IOException { + if (transaction==null) { + ttable.getHTable().delete(deletes); + } + ttable.delete(transaction, deletes); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + byte[] value, Delete delete) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, Delete delete) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public void mutateRow(RowMutations rm) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public Result append(Append append) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public Result increment(Increment increment) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount, Durability durability) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public void close() throws IOException { + ttable.close(); + } + + @Override + public CoprocessorRpcChannel coprocessorService(byte[] row) { + throw new RuntimeException("Not implemented"); + } + + @Override + public Map coprocessorService( + Class service, byte[] startKey, byte[] endKey, Call callable) + throws ServiceException, Throwable { + throw new RuntimeException("Not implemented"); + } + + @Override + public void coprocessorService(Class service, + byte[] startKey, byte[] endKey, Call callable, Callback callback) + throws ServiceException, Throwable { + throw new RuntimeException("Not implemented"); + } + + @Override + public Map batchCoprocessorService( + MethodDescriptor methodDescriptor, Message request, byte[] startKey, + byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + throw new RuntimeException("Not implemented"); + } + + @Override + public void batchCoprocessorService( + MethodDescriptor methodDescriptor, Message request, byte[] startKey, + byte[] endKey, R responsePrototype, Callback callback) + throws ServiceException, Throwable { + throw new RuntimeException("Not implemented"); + } + + @Override + public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, + CompareOp compareOp, byte[] value, RowMutations mutation) + throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public byte[] getTableName() { + return ttable.getTableName(); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, + long amount, boolean writeToWAL) throws IOException { + throw new RuntimeException("Not implemented"); + } + + @Override + public Boolean[] exists(List gets) throws IOException { + Boolean[] exists = new Boolean[gets.size()]; + for (int i=0;i2.6.0 3.0.0 0.6.0 + 0.8.1.21 2.2.4 @@ -234,6 +235,16 @@ false + + bintray-yahoo-maven + http://yahoo.bintray.com/maven + + true + + + false + + @@ -696,6 +707,11 @@ org.apache.hbase + hbase-client + ${hbase.version} + + + org.apache.hbase hbase-common ${hbase.version}