diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index c3a94e3..f118bd5 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -112,13 +112,13 @@ public class HTable implements HTableInterface, RegionLocator { private volatile Configuration configuration; protected List writeAsyncBuffer = new LinkedList(); private long writeBufferSize; - private boolean clearBufferOnFail; - private boolean autoFlush; - protected long currentWriteBufferSize; + private boolean clearBufferOnFail = true; + private boolean autoFlush = true; + protected long currentWriteBufferSize = 0 ; + private boolean closed = false; protected int scannerCaching; private int maxKeyValueSize; private ExecutorService pool; // For Multi & Scan - private boolean closed; private int operationTimeout; private int retries; private final boolean cleanupPoolOnClose; // shutdown the pool in close() @@ -127,7 +127,6 @@ public class HTable implements HTableInterface, RegionLocator { private int primaryCallTimeoutMicroSecond; private int replicaCallTimeoutMicroSecondScan; - /** The Async process for puts with autoflush set to false or multiputs */ protected AsyncProcess ap; /** The Async process for batch */ @@ -319,9 +318,10 @@ public class HTable implements HTableInterface, RegionLocator { /** * For internal testing. + * @throws IOException */ @VisibleForTesting - protected HTable() { + protected HTable() throws IOException { tableName = null; cleanupPoolOnClose = false; cleanupConnectionOnClose = false; @@ -345,9 +345,6 @@ public class HTable implements HTableInterface, RegionLocator { HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.writeBufferSize = this.configuration.getLong( "hbase.client.write.buffer", 2097152); - this.clearBufferOnFail = true; - this.autoFlush = true; - this.currentWriteBufferSize = 0; this.scannerCaching = this.configuration.getInt( HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); @@ -365,7 +362,6 @@ public class HTable implements HTableInterface, RegionLocator { multiAp = this.connection.getAsyncProcess(); this.maxKeyValueSize = getMaxKeyValueSize(this.configuration); - this.closed = false; } /** diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 8d77d7a..8a3aafc 100644 --- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -663,7 +663,7 @@ public class TestAsyncProcess { HTable ht = new HTable(); MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true); ht.ap = ap; - ht.setAutoFlush(true, true); + ht.setAutoFlushTo(true); if (bufferOn) { ht.setWriteBufferSize(1024L * 1024L); } else { @@ -711,7 +711,7 @@ public class TestAsyncProcess { HTable ht = new HTable(); MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, true); ht.ap = ap; - ht.setAutoFlush(false, true); + ht.setAutoFlushTo(false); ht.setWriteBufferSize(0); Put p = createPut(1, false); @@ -739,7 +739,7 @@ public class TestAsyncProcess { public void testWithNoClearOnFail() throws IOException { HTable ht = new HTable(); ht.ap = new MyAsyncProcess(createHConnection(), conf, true); - ht.setAutoFlush(false, false); + ht.setAutoFlush(false); Put p = createPut(1, false); ht.put(p); @@ -806,7 +806,7 @@ public class TestAsyncProcess { ht.ap.serverTrackerTimeout = 1; Put p = createPut(1, false); - ht.setAutoFlush(false, false); + ht.setAutoFlush(false); ht.put(p); try { @@ -828,7 +828,7 @@ public class TestAsyncProcess { Assert.assertNotNull(ht.ap.createServerErrorTracker()); Put p = createPut(1, true); - ht.setAutoFlush(false, false); + ht.setAutoFlush(false); ht.put(p); try { diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 177341f..37e4f8b 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -363,7 +363,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { protected void instantiateHTable(Configuration conf) throws IOException { table = new HTable(conf, getTableName(conf)); - table.setAutoFlush(false, true); + table.setAutoFlushTo(false); table.setWriteBufferSize(4 * 1024 * 1024); } diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index 30ca60d..106be61 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -185,7 +185,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB protected void instantiateHTable(Configuration conf) throws IOException { for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) { HTable table = new HTable(conf, getTableName(i)); - table.setAutoFlush(true, true); + table.setAutoFlushTo(true); //table.setWriteBufferSize(4 * 1024 * 1024); this.tables[i] = table; } diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index 5c9a9ad..60f20a5 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -181,7 +181,7 @@ public void cleanUpCluster() throws Exception { numBackReferencesPerRow = conf.getInt(NUM_BACKREFS_KEY, NUM_BACKREFS_DEFAULT); table = new HTable(conf, TableName.valueOf(tableName)); table.setWriteBufferSize(4*1024*1024); - table.setAutoFlush(false, true); + table.setAutoFlushTo(false); String taskId = conf.get("mapreduce.task.attempt.id"); Matcher matcher = Pattern.compile(".+_m_(\\d+_\\d+)").matcher(taskId); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java index c96a6ac..b1cf57e 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java @@ -239,7 +239,7 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool { for (int x = 0; x < 5000; x++) { TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS); try { - ht.setAutoFlush(false, true); + ht.setAutoFlushTo(false); for (int i = 0; i < 5; i++) { long rk = random.nextLong(); rowKeys.add(rk); diff --git hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 7e17c01..b02f069 100644 --- hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -909,7 +909,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void testSetup() throws IOException { this.table = connection.getTable(tableName); - this.table.setAutoFlush(false, true); + this.table.setAutoFlushTo(false); } void testTakedown() throws IOException { diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index dab39a8..b38f353 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -67,7 +67,7 @@ public class TableOutputFormat extends FileOutputFormat rowsUpdate = new ArrayList(); for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { byte[] row = Bytes.toBytes("row" + i); @@ -3946,7 +3946,7 @@ public class TestFromClientSide { final int NB_BATCH_ROWS = 10; HTable table = TEST_UTIL.createTable(Bytes.toBytes("testRowsPutBufferedManyManyFlushes"), new byte[][] {CONTENTS_FAMILY, SMALL_FAMILY }); - table.setAutoFlush(false, true); + table.setAutoFlushTo(false); table.setWriteBufferSize(10); ArrayList rowsUpdate = new ArrayList(); for (int i = 0; i < NB_BATCH_ROWS * 10; i++) { @@ -4275,7 +4275,7 @@ public class TestFromClientSide { new byte[][] { HConstants.CATALOG_FAMILY, Bytes.toBytes("info2") }, 1, 1024); // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow // in Store.rowAtOrBeforeFromStoreFile - table.setAutoFlush(true); + table.setAutoFlushTo(true); String regionName = table.getRegionLocations().firstKey().getEncodedName(); HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableAname).getFromOnlineRegions(regionName); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 20828ec..087b7a6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -262,7 +262,7 @@ public class TestMultiParallel { // Load the data LOG.info("get new table"); HTable table = new HTable(UTIL.getConfiguration(), TEST_TABLE); - table.setAutoFlush(false, true); + table.setAutoFlushTo(false); table.setWriteBufferSize(10 * 1024 * 1024); LOG.info("constructPutRequests"); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java index de0057c..4649961 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java @@ -178,7 +178,7 @@ public class TestHTableWrapper { boolean initialAutoFlush = hTableInterface.isAutoFlush(); hTableInterface.setAutoFlushTo(false); assertFalse(hTableInterface.isAutoFlush()); - hTableInterface.setAutoFlush(true, true); + hTableInterface.setAutoFlushTo(true); assertTrue(hTableInterface.isAutoFlush()); hTableInterface.setAutoFlushTo(initialAutoFlush); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 5321ae8..064b11b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -917,7 +917,7 @@ public class TestDistributedLogSplitting { if (key == null || key.length == 0) { key = new byte[] { 0, 0, 0, 0, 1 }; } - ht.setAutoFlush(true, true); + ht.setAutoFlushTo(true); Put put = new Put(key); put.add(Bytes.toBytes("family"), Bytes.toBytes("c1"), new byte[]{'b'}); ht.put(put); @@ -1586,7 +1586,7 @@ public class TestDistributedLogSplitting { * Load table with puts and deletes with expected values so that we can verify later */ private void prepareData(final HTable t, final byte[] f, final byte[] column) throws IOException { - t.setAutoFlush(false, true); + t.setAutoFlushTo(false); byte[] k = new byte[3]; // add puts diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 3ae82ee..d3285a3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -348,7 +348,7 @@ public class TestRegionServerMetrics { TEST_UTIL.createTable(tableName, cf); HTable t = new HTable(conf, tableName); - t.setAutoFlush(false, true); + t.setAutoFlushTo(false); for (int insertCount =0; insertCount < 100; insertCount++) { Put p = new Put(Bytes.toBytes("" + insertCount + "row")); p.add(cf, qualifier, val); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index a3349a1..1ce3441 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -483,7 +483,7 @@ public class TestLogRolling { writeData(table, 1002); - table.setAutoFlush(true, true); + table.setAutoFlushTo(true); long curTime = System.currentTimeMillis(); long oldFilenum = log.getFilenum(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java index fcb3cda..4eeefb8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java @@ -54,7 +54,7 @@ public class TestReplicationChangingPeerRegionservers extends TestReplicationBas */ @Before public void setUp() throws Exception { - htable1.setAutoFlush(false, true); + htable1.setAutoFlushTo(false); // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue for (JVMClusterUtil.RegionServerThread r : diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index b61b19a..df2798f 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -69,7 +69,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { */ @Before public void setUp() throws Exception { - htable1.setAutoFlush(true, true); + htable1.setAutoFlushTo(true); // Starting and stopping replication can make us miss new logs, // rolling like this makes sure the most recent one gets added to the queue for ( JVMClusterUtil.RegionServerThread r : @@ -247,7 +247,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { LOG.info("testSmallBatch"); Put put; // normal Batch tests - htable1.setAutoFlush(false, true); + htable1.setAutoFlushTo(false); for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { put = new Put(Bytes.toBytes(i)); put.add(famName, row, row); @@ -387,7 +387,7 @@ public class TestReplicationSmallTests extends TestReplicationBase { public void testLoading() throws Exception { LOG.info("Writing out rows to table1 in testLoading"); htable1.setWriteBufferSize(1024); - htable1.setAutoFlush(false, true); + htable1.setAutoFlushTo(false); for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) { Put put = new Put(Bytes.toBytes(i)); put.add(famName, row, row); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index a3d1aac..cebb3c4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -678,7 +678,7 @@ public class SnapshotTestingUtils { public static void loadData(final HBaseTestingUtility util, final HTable table, int rows, byte[]... families) throws IOException, InterruptedException { - table.setAutoFlush(false, true); + table.setAutoFlushTo(false); // Ensure one row per region assertTrue(rows >= KEYS.length);