diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
index 597573a..895172d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
@@ -19,35 +19,29 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
import java.util.NavigableMap;
-import junit.framework.AssertionFailedError;
-import junit.framework.TestCase;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionTable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import junit.framework.AssertionFailedError;
+import junit.framework.TestCase;
+
/**
* Abstract HBase test class. Initializes a few things that can come in handly
* like an HBaseConfiguration and filesystem.
@@ -239,7 +233,7 @@ public abstract class HBaseTestCase extends TestCase {
if (startKeyBytes == null || startKeyBytes.length == 0) {
startKeyBytes = START_KEY_BYTES;
}
- return addContent(new HRegionIncommon(r), Bytes.toString(columnFamily), Bytes.toString(column),
+ return addContent(new RegionTable(r), Bytes.toString(columnFamily), Bytes.toString(column),
startKeyBytes, endKey, -1);
}
@@ -251,18 +245,15 @@ public abstract class HBaseTestCase extends TestCase {
* Add content to region r on the passed column
* column.
* Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @param updater An instance of {@link Incommon}.
- * @param columnFamily
- * @param writeToWAL
* @throws IOException
* @return count of what we added.
*/
- public static long addContent(final Incommon updater,
+ public static long addContent(final Table updater,
final String columnFamily) throws IOException {
return addContent(updater, columnFamily, START_KEY_BYTES, null);
}
- public static long addContent(final Incommon updater, final String family,
+ public static long addContent(final Table updater, final String family,
final String column) throws IOException {
return addContent(updater, family, column, START_KEY_BYTES, null);
}
@@ -271,21 +262,16 @@ public abstract class HBaseTestCase extends TestCase {
* Add content to region r on the passed column
* column.
* Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @param updater An instance of {@link Incommon}.
- * @param columnFamily
- * @param startKeyBytes Where to start the rows inserted
- * @param endKey Where to stop inserting rows.
- * @param writeToWAL
* @return count of what we added.
* @throws IOException
*/
- public static long addContent(final Incommon updater, final String columnFamily,
+ public static long addContent(final Table updater, final String columnFamily,
final byte [] startKeyBytes, final byte [] endKey)
throws IOException {
return addContent(updater, columnFamily, null, startKeyBytes, endKey, -1);
}
- public static long addContent(final Incommon updater, final String family, String column,
+ public static long addContent(final Table updater, final String family, String column,
final byte [] startKeyBytes, final byte [] endKey) throws IOException {
return addContent(updater, family, column, startKeyBytes, endKey, -1);
}
@@ -294,16 +280,10 @@ public abstract class HBaseTestCase extends TestCase {
* Add content to region r on the passed column
* column.
* Adds data of the from 'aaa', 'aab', etc where key and value are the same.
- * @param updater An instance of {@link Incommon}.
- * @param column
- * @param startKeyBytes Where to start the rows inserted
- * @param endKey Where to stop inserting rows.
- * @param ts Timestamp to write the content with.
- * @param writeToWAL
* @return count of what we added.
* @throws IOException
*/
- public static long addContent(final Incommon updater,
+ public static long addContent(final Table updater,
final String columnFamily,
final String column,
final byte [] startKeyBytes, final byte [] endKey, final long ts)
@@ -378,209 +358,6 @@ public abstract class HBaseTestCase extends TestCase {
return count;
}
- /**
- * Implementors can flushcache.
- */
- public interface FlushCache {
- /**
- * @throws IOException
- */
- void flushcache() throws IOException;
- }
-
- /**
- * Interface used by tests so can do common operations against an HTable
- * or an HRegion.
- *
- * TOOD: Come up w/ a better name for this interface.
- */
- public interface Incommon {
- /**
- *
- * @param delete
- * @param writeToWAL
- * @throws IOException
- */
- void delete(Delete delete, boolean writeToWAL)
- throws IOException;
-
- /**
- * @param put
- * @throws IOException
- */
- void put(Put put) throws IOException;
-
- Result get(Get get) throws IOException;
-
- /**
- * @param family
- * @param qualifiers
- * @param firstRow
- * @param ts
- * @return scanner for specified columns, first row and timestamp
- * @throws IOException
- */
- ScannerIncommon getScanner(
- byte[] family, byte[][] qualifiers, byte[] firstRow, long ts
- )
- throws IOException;
- }
-
- /**
- * A class that makes a {@link Incommon} out of a {@link HRegion}
- */
- public static class HRegionIncommon implements Incommon, FlushCache {
- final HRegion region;
-
- /**
- * @param HRegion
- */
- public HRegionIncommon(final HRegion HRegion) {
- this.region = HRegion;
- }
-
- public HRegionIncommon(final Region region) {
- this.region = (HRegion)region;
- }
-
- public void put(Put put) throws IOException {
- region.put(put);
- }
-
- public void delete(Delete delete, boolean writeToWAL)
- throws IOException {
- this.region.delete(delete);
- }
-
- public Result get(Get get) throws IOException {
- return region.get(get);
- }
-
- public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
- byte [] firstRow, long ts)
- throws IOException {
- Scan scan = new Scan(firstRow);
- if(qualifiers == null || qualifiers.length == 0) {
- scan.addFamily(family);
- } else {
- for(int i=0; i {
- boolean next(List values)
- throws IOException;
-
- void close() throws IOException;
- }
-
- public static class ClientScannerIncommon implements ScannerIncommon {
- ResultScanner scanner;
- public ClientScannerIncommon(ResultScanner scanner) {
- this.scanner = scanner;
- }
-
- @Override
- public boolean next(List values)
- throws IOException {
- Result results = scanner.next();
- if (results == null) {
- return false;
- }
- values.clear();
- values.addAll(results.listCells());
- return true;
- }
-
- public void close() throws IOException {
- scanner.close();
- }
-
- public Iterator iterator() {
- return scanner.iterator();
- }
- }
-
- public static class InternalScannerIncommon implements ScannerIncommon {
- InternalScanner scanner;
-
- public InternalScannerIncommon(InternalScanner scanner) {
- this.scanner = scanner;
- }
-
- @Override
- public boolean next(List results)
- throws IOException {
- return scanner.next(results);
- }
-
- @Override
- public void close() throws IOException {
- scanner.close();
- }
-
- @Override
- public Iterator iterator() {
- throw new UnsupportedOperationException();
- }
- }
-
protected void assertResultEquals(final HRegion region, final byte [] row,
final byte [] family, final byte [] qualifier, final long timestamp,
final byte [] value)
@@ -669,5 +446,4 @@ public abstract class HBaseTestCase extends TestCase {
Bytes.toStringBinary(actual) + ">");
}
}
-
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
index 278973e..439e722 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java
@@ -30,9 +30,7 @@ import java.util.NavigableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HBaseTestCase.FlushCache;
-import org.apache.hadoop.hbase.HBaseTestCase.HTableIncommon;
-import org.apache.hadoop.hbase.HBaseTestCase.Incommon;
+import org.apache.hadoop.hbase.TimestampTestBase.FlushCache;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -97,8 +95,7 @@ public class TestMultiVersions {
Table table = UTIL.getConnection().getTable(desc.getTableName());
// TODO: Remove these deprecated classes or pull them in here if this is
// only test using them.
- Incommon incommon = new HTableIncommon(table);
- TimestampTestBase.doTestDelete(incommon, new FlushCache() {
+ TimestampTestBase.doTestDelete(table, new FlushCache() {
public void flushcache() throws IOException {
UTIL.getHBaseCluster().flushcache();
}
@@ -106,7 +103,7 @@ public class TestMultiVersions {
// Perhaps drop and readd the table between tests so the former does
// not pollute this latter? Or put into separate tests.
- TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() {
+ TimestampTestBase.doTestTimestampScanning(table, new FlushCache() {
public void flushcache() throws IOException {
UTIL.getMiniHBaseCluster().flushcache();
}
@@ -136,7 +133,7 @@ public class TestMultiVersions {
desc.addFamily(hcd);
this.admin.createTable(desc);
Put put = new Put(row, timestamp1);
- put.add(contents, contents, value1);
+ put.addColumn(contents, contents, value1);
Table table = UTIL.getConnection().getTable(desc.getTableName());
table.put(put);
// Shut down and restart the HBase cluster
@@ -148,7 +145,7 @@ public class TestMultiVersions {
table = UTIL.getConnection().getTable(desc.getTableName());
// Overwrite previous value
put = new Put(row, timestamp2);
- put.add(contents, contents, value2);
+ put.addColumn(contents, contents, value2);
table.put(put);
// Now verify that getRow(row, column, latest) works
Get get = new Get(row);
@@ -221,7 +218,7 @@ public class TestMultiVersions {
for (int i = 0; i < startKeys.length; i++) {
for (int j = 0; j < timestamp.length; j++) {
Put put = new Put(rows[i], timestamp[j]);
- put.add(HConstants.CATALOG_FAMILY, null, timestamp[j],
+ put.addColumn(HConstants.CATALOG_FAMILY, null, timestamp[j],
Bytes.toBytes(timestamp[j]));
puts.add(put);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
index 40baf41..879d065 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
@@ -24,15 +24,19 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
/**
* Tests user specifiable time stamps putting, getting and scanning. Also
* tests same in presence of deletes. Test cores are written so can be
* run against an HRegion and against an HTable: i.e. both local and remote.
*/
-public class TimestampTestBase extends HBaseTestCase {
+public class TimestampTestBase {
private static final long T0 = 10L;
private static final long T1 = 100L;
private static final long T2 = 200L;
@@ -42,73 +46,73 @@ public class TimestampTestBase extends HBaseTestCase {
private static final byte [] ROW = Bytes.toBytes("row");
- /*
+ interface FlushCache {
+ void flushcache() throws IOException;
+ }
+
+ /*
* Run test that delete works according to description in hadoop-1784.
* @param incommon
* @param flusher
* @throws IOException
*/
- public static void doTestDelete(final Incommon incommon, FlushCache flusher)
+ public static void doTestDelete(final Table table, FlushCache flusher)
throws IOException {
// Add values at various timestamps (Values are timestampes as bytes).
- put(incommon, T0);
- put(incommon, T1);
- put(incommon, T2);
- put(incommon);
+ put(table, T0);
+ put(table, T1);
+ put(table, T2);
+ put(table);
// Verify that returned versions match passed timestamps.
- assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
+ assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
- // If I delete w/o specifying a timestamp, this means I'm deleting the
- // latest.
- delete(incommon);
- // Verify that I get back T2 through T1 -- that the latest version has
- // been deleted.
- assertVersions(incommon, new long [] {T2, T1, T0});
+ // If I delete w/o specifying a timestamp, this means I'm deleting the latest.
+ delete(table);
+ // Verify that I get back T2 through T1 -- that the latest version has been deleted.
+ assertVersions(table, new long [] {T2, T1, T0});
// Flush everything out to disk and then retry
flusher.flushcache();
- assertVersions(incommon, new long [] {T2, T1, T0});
+ assertVersions(table, new long [] {T2, T1, T0});
// Now add, back a latest so I can test remove other than the latest.
- put(incommon);
- assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
- delete(incommon, T2);
- assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
+ put(table);
+ assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
+ delete(table, T2);
+ assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
// Flush everything out to disk and then retry
flusher.flushcache();
- assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
+ assertVersions(table, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
// Now try deleting all from T2 back inclusive (We first need to add T2
- // back into the mix and to make things a little interesting, delete and
- // then readd T1.
- put(incommon, T2);
- delete(incommon, T1);
- put(incommon, T1);
+ // back into the mix and to make things a little interesting, delete and then readd T1.
+ put(table, T2);
+ delete(table, T1);
+ put(table, T1);
Delete delete = new Delete(ROW);
- delete.deleteColumns(FAMILY_NAME, QUALIFIER_NAME, T2);
- incommon.delete(delete, true);
+ delete.addColumns(FAMILY_NAME, QUALIFIER_NAME, T2);
+ table.delete(delete);
// Should only be current value in set. Assert this is so
- assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
+ assertOnlyLatest(table, HConstants.LATEST_TIMESTAMP);
// Flush everything out to disk and then redo above tests
flusher.flushcache();
- assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
+ assertOnlyLatest(table, HConstants.LATEST_TIMESTAMP);
}
- private static void assertOnlyLatest(final Incommon incommon,
- final long currentTime)
+ private static void assertOnlyLatest(final Table incommon, final long currentTime)
throws IOException {
Get get = null;
get = new Get(ROW);
get.addColumn(FAMILY_NAME, QUALIFIER_NAME);
get.setMaxVersions(3);
Result result = incommon.get(get);
- assertEquals(1, result.size());
+ Assert.assertEquals(1, result.size());
long time = Bytes.toLong(CellUtil.cloneValue(result.rawCells()[0]));
- assertEquals(time, currentTime);
+ Assert.assertEquals(time, currentTime);
}
/*
@@ -119,7 +123,7 @@ public class TimestampTestBase extends HBaseTestCase {
* @param tss
* @throws IOException
*/
- public static void assertVersions(final Incommon incommon, final long [] tss)
+ public static void assertVersions(final Table incommon, final long [] tss)
throws IOException {
// Assert that 'latest' is what we expect.
Get get = null;
@@ -128,7 +132,7 @@ public class TimestampTestBase extends HBaseTestCase {
Result r = incommon.get(get);
byte [] bytes = r.getValue(FAMILY_NAME, QUALIFIER_NAME);
long t = Bytes.toLong(bytes);
- assertEquals(tss[0], t);
+ Assert.assertEquals(tss[0], t);
// Now assert that if we ask for multiple versions, that they come out in
// order.
@@ -137,10 +141,10 @@ public class TimestampTestBase extends HBaseTestCase {
get.setMaxVersions(tss.length);
Result result = incommon.get(get);
Cell [] kvs = result.rawCells();
- assertEquals(kvs.length, tss.length);
+ Assert.assertEquals(kvs.length, tss.length);
for(int i=0;iUse as an instance of a {@link Table} in-the-small -- no networking or servers
+ * necessary -- or to write a test that can run directly against the datastore and then
+ * over the network.
+ */
+public class RegionTable implements Table {
+ private final Region region;
+
+ /**
+ * @param region Region to decorate with Table API.
+ */
+ public RegionTable(final Region region) {
+ this.region = region;
+ }
+
+ @Override
+ public TableName getName() {
+ return this.region.getTableDesc().getTableName();
+ }
+
+ @Override
+ public Configuration getConfiguration() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public HTableDescriptor getTableDescriptor() throws IOException {
+ return this.region.getTableDesc();
+ }
+
+ @Override
+ public boolean exists(Get get) throws IOException {
+ if (!get.isCheckExistenceOnly()) throw new IllegalArgumentException();
+ return get(get) != null;
+ }
+
+ @Override
+ public boolean[] existsAll(List gets) throws IOException {
+ boolean [] results = new boolean[gets.size()];
+ int index = 0;
+ for (Get get: gets) {
+ results[index++] = exists(get);
+ }
+ return results;
+ }
+
+ @Override
+ public void batch(List extends Row> actions, Object[] results)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void batchCallback(List extends Row> actions, Object[] results,
+ Callback callback)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Result get(Get get) throws IOException {
+ return this.region.get(get);
+ }
+
+ @Override
+ public Result[] get(List gets) throws IOException {
+ Result [] results = new Result[gets.size()];
+ int index = 0;
+ for (Get get: gets) {
+ results[index++] = get(get);
+ }
+ return results;
+ }
+
+ static class RegionScannerToResultScannerAdaptor implements ResultScanner {
+ private static final Result [] EMPTY_RESULT_ARRAY = new Result[0];
+ private final RegionScanner regionScanner;
+
+ RegionScannerToResultScannerAdaptor(final RegionScanner regionScanner) {
+ this.regionScanner = regionScanner;
+ }
+
+ @Override
+ public Iterator iterator() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Result next() throws IOException {
+ List cells = new ArrayList();
+ return regionScanner.next(cells)? Result.create(cells): null;
+ }
+
+ @Override
+ public Result[] next(int nbRows) throws IOException {
+ List results = new ArrayList(nbRows);
+ for (int i = 0; i < nbRows; i++) {
+ Result result = next();
+ if (result == null) break;
+ results.add(result);
+ }
+ return results.toArray(EMPTY_RESULT_ARRAY);
+ }
+
+ @Override
+ public void close() {
+ try {
+ regionScanner.close();
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public boolean renewLease() {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ @Override
+ public ResultScanner getScanner(Scan scan) throws IOException {
+ return new RegionScannerToResultScannerAdaptor(this.region.getScanner(scan));
+ }
+
+ @Override
+ public ResultScanner getScanner(byte[] family) throws IOException {
+ return getScanner(new Scan().addFamily(family));
+ }
+
+ @Override
+ public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
+ return getScanner(new Scan().addColumn(family, qualifier));
+ }
+
+ @Override
+ public void put(Put put) throws IOException {
+ this.region.put(put);
+ }
+
+ @Override
+ public void put(List puts) throws IOException {
+ for (Put put: puts) put(put);
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Put put)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void delete(Delete delete) throws IOException {
+ this.region.delete(delete);
+ }
+
+ @Override
+ public void delete(List deletes) throws IOException {
+ for(Delete delete: deletes) delete(delete);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
+ Delete delete)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
+ CompareOp compareOp, byte[] value, Delete delete)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void mutateRow(RowMutations rm) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Result append(Append append) throws IOException {
+ return this.region.append(append, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ }
+
+ @Override
+ public Result increment(Increment increment) throws IOException {
+ return this.region.increment(increment, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
+ Durability durability)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * This call will NOT close the underlying region.
+ */
+ @Override
+ public void close() throws IOException {
+ }
+
+ @Override
+ public CoprocessorRpcChannel coprocessorService(byte[] row) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Map coprocessorService(Class service, byte[] startKey,
+ byte[] endKey, Call callable)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void coprocessorService(Class service, byte[] startKey,
+ byte[] endKey, Call callable, Callback callback)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getWriteBufferSize() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setWriteBufferSize(long writeBufferSize) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Map batchCoprocessorService(MethodDescriptor
+ methodDescriptor, Message request,
+ byte[] startKey, byte[] endKey, R responsePrototype)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void batchCoprocessorService(MethodDescriptor methodDescriptor,
+ Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback)
+ throws ServiceException, Throwable {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, RowMutations mutation)
+ throws IOException {
+ throw new UnsupportedOperationException();
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index a377325..cad77e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -37,8 +37,6 @@ import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
@@ -47,7 +45,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -56,6 +53,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
@@ -85,7 +83,6 @@ import org.mockito.stubbing.Answer;
@Category({RegionServerTests.class, MediumTests.class})
public class TestCompaction {
@Rule public TestName name = new TestName();
- private static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
protected Configuration conf = UTIL.getConfiguration();
@@ -147,15 +144,15 @@ public class TestCompaction {
int jmax = (int) Math.ceil(15.0/compactionThreshold);
byte [] pad = new byte[1000]; // 1 KB chunk
for (int i = 0; i < compactionThreshold; i++) {
- HRegionIncommon loader = new HRegionIncommon(r);
+ Table loader = new RegionTable(r);
Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
p.setDurability(Durability.SKIP_WAL);
for (int j = 0; j < jmax; j++) {
- p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
+ p.addColumn(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
- loader.flushcache();
+ r.flush(true);
}
HRegion spyR = spy(r);
@@ -229,9 +226,9 @@ public class TestCompaction {
}
private void createStoreFile(final HRegion region, String family) throws IOException {
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
HBaseTestCase.addContent(loader, family);
- loader.flushcache();
+ region.flush(true);
}
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 0409df3..8402fb2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -48,6 +47,7 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
@@ -198,7 +198,7 @@ public class TestMajorCompaction {
createStoreFile(r);
}
// Add more content.
- HBaseTestCase.addContent(new HRegionIncommon(r), Bytes.toString(COLUMN_FAMILY));
+ HBaseTestCase.addContent(new RegionTable(r), Bytes.toString(COLUMN_FAMILY));
// Now there are about 5 versions of each column.
// Default is that there only 3 (MAXVERSIONS) versions allowed per column.
@@ -387,16 +387,16 @@ public class TestMajorCompaction {
}
private void createStoreFile(final Region region, String family) throws IOException {
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
HBaseTestCase.addContent(loader, family);
- loader.flushcache();
+ region.flush(true);
}
private void createSmallerStoreFile(final Region region) throws IOException {
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" +
"bbb").getBytes(), null);
- loader.flushcache();
+ region.flush(true);
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
index 47f3a8f..b49842b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
@@ -28,7 +28,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
@@ -37,6 +36,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
@@ -162,7 +162,7 @@ public class TestMinorCompaction {
}
private void testMinorCompactionWithDelete(Delete delete, int expectedResultsAfterDelete) throws Exception {
- HRegionIncommon loader = new HRegionIncommon(r);
+ Table loader = new RegionTable(r);
for (int i = 0; i < compactionThreshold + 1; i++) {
HBaseTestCase.addContent(loader, Bytes.toString(fam1), Bytes.toString(col1), firstRowBytes,
thirdRowBytes, i);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index 8d2d857..5cd8cc7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -38,11 +38,11 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -125,12 +125,12 @@ public class TestMobStoreCompaction {
public void testSmallerValue() throws Exception {
init(UTIL.getConfiguration(), 500);
byte[] dummyData = makeDummyData(300); // smaller than mob threshold
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
// one hfile per row
for (int i = 0; i < compactionThreshold; i++) {
Put p = createPut(i, dummyData);
loader.put(p);
- loader.flushcache();
+ region.flush(true);
}
assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles());
assertEquals("Before compaction: mob file count", 0, countMobFiles());
@@ -153,11 +153,11 @@ public class TestMobStoreCompaction {
public void testLargerValue() throws Exception {
init(UTIL.getConfiguration(), 200);
byte[] dummyData = makeDummyData(300); // larger than mob threshold
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
for (int i = 0; i < compactionThreshold; i++) {
Put p = createPut(i, dummyData);
loader.put(p);
- loader.flushcache();
+ region.flush(true);
}
assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles());
assertEquals("Before compaction: mob file count", compactionThreshold, countMobFiles());
@@ -221,14 +221,14 @@ public class TestMobStoreCompaction {
public void testMajorCompactionAfterDelete() throws Exception {
init(UTIL.getConfiguration(), 100);
byte[] dummyData = makeDummyData(200); // larger than mob threshold
- HRegionIncommon loader = new HRegionIncommon(region);
+ Table loader = new RegionTable(region);
// create hfiles and mob hfiles but don't trigger compaction
int numHfiles = compactionThreshold - 1;
byte[] deleteRow = Bytes.add(STARTROW, Bytes.toBytes(0));
for (int i = 0; i < numHfiles; i++) {
Put p = createPut(i, dummyData);
loader.put(p);
- loader.flushcache();
+ region.flush(true);
}
assertEquals("Before compaction: store files", numHfiles, countStoreFiles());
assertEquals("Before compaction: mob file count", numHfiles, countMobFiles());
@@ -239,7 +239,7 @@ public class TestMobStoreCompaction {
Delete delete = new Delete(deleteRow);
delete.addFamily(COLUMN_FAMILY);
region.delete(delete);
- loader.flushcache();
+ region.flush(true);
assertEquals("Before compaction: store files", numHfiles + 1, countStoreFiles());
assertEquals("Before compaction: mob files", numHfiles, countMobFiles());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
index cdef351..73dce1c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
@@ -36,8 +36,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon;
-import org.apache.hadoop.hbase.HBaseTestCase.ScannerIncommon;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -49,7 +47,9 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
import org.apache.hadoop.hbase.filter.PrefixFilter;
@@ -99,11 +99,10 @@ public class TestScanner {
private static final long START_CODE = Long.MAX_VALUE;
- private Region r;
- private HRegionIncommon region;
+ private HRegion region;
private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
- final private byte[] col1, col2;
+ final private byte[] col1;
public TestScanner() {
super();
@@ -115,7 +114,6 @@ public class TestScanner {
thirdRowBytes = START_KEY_BYTES.clone();
thirdRowBytes[START_KEY_BYTES.length - 1] += 2;
col1 = Bytes.toBytes("column1");
- col2 = Bytes.toBytes("column2");
}
/**
@@ -127,14 +125,14 @@ public class TestScanner {
byte [] startrow = Bytes.toBytes("bbb");
byte [] stoprow = Bytes.toBytes("ccc");
try {
- this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
+ this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
List results = new ArrayList();
// Do simple test of getting one row only first.
Scan scan = new Scan(Bytes.toBytes("abc"), Bytes.toBytes("abd"));
scan.addFamily(HConstants.CATALOG_FAMILY);
- InternalScanner s = r.getScanner(scan);
+ InternalScanner s = region.getScanner(scan);
int count = 0;
while (s.next(results)) {
count++;
@@ -145,7 +143,7 @@ public class TestScanner {
scan = new Scan(startrow, stoprow);
scan.addFamily(HConstants.CATALOG_FAMILY);
- s = r.getScanner(scan);
+ s = region.getScanner(scan);
count = 0;
Cell kv = null;
results = new ArrayList();
@@ -162,14 +160,14 @@ public class TestScanner {
assertTrue(count > 10);
s.close();
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
void rowPrefixFilter(Scan scan) throws IOException {
List results = new ArrayList();
scan.addFamily(HConstants.CATALOG_FAMILY);
- InternalScanner s = r.getScanner(scan);
+ InternalScanner s = region.getScanner(scan);
boolean hasMore = true;
while (hasMore) {
hasMore = s.next(results);
@@ -185,7 +183,7 @@ public class TestScanner {
void rowInclusiveStopFilter(Scan scan, byte[] stopRow) throws IOException {
List results = new ArrayList();
scan.addFamily(HConstants.CATALOG_FAMILY);
- InternalScanner s = r.getScanner(scan);
+ InternalScanner s = region.getScanner(scan);
boolean hasMore = true;
while (hasMore) {
hasMore = s.next(results);
@@ -200,8 +198,8 @@ public class TestScanner {
@Test
public void testFilters() throws IOException {
try {
- this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
+ this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
byte [] prefix = Bytes.toBytes("ab");
Filter newFilter = new PrefixFilter(prefix);
Scan scan = new Scan();
@@ -215,7 +213,7 @@ public class TestScanner {
rowInclusiveStopFilter(scan, stopRow);
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -227,10 +225,10 @@ public class TestScanner {
@Test
public void testRaceBetweenClientAndTimeout() throws Exception {
try {
- this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HBaseTestCase.addContent(this.r, HConstants.CATALOG_FAMILY);
+ this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ HBaseTestCase.addContent(this.region, HConstants.CATALOG_FAMILY);
Scan scan = new Scan();
- InternalScanner s = r.getScanner(scan);
+ InternalScanner s = region.getScanner(scan);
List results = new ArrayList();
try {
s.next(results);
@@ -242,7 +240,7 @@ public class TestScanner {
return;
}
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -252,71 +250,70 @@ public class TestScanner {
@Test
public void testScanner() throws IOException {
try {
- r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- region = new HRegionIncommon(r);
+ region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ Table table = new RegionTable(region);
// Write information to the meta table
Put put = new Put(ROW_KEY, System.currentTimeMillis());
- put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ put.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
REGION_INFO.toByteArray());
- region.put(put);
+ table.put(put);
// What we just committed is in the memstore. Verify that we can get
// it back both with scanning and get
scan(false, null);
- getRegionInfo();
+ getRegionInfo(table);
// Close and re-open
- ((HRegion)r).close();
- r = HRegion.openHRegion(r, null);
- region = new HRegionIncommon(r);
+ ((HRegion)region).close();
+ region = HRegion.openHRegion(region, null);
+ table = new RegionTable(region);
// Verify we can get the data back now that it is on disk.
scan(false, null);
- getRegionInfo();
+ getRegionInfo(table);
// Store some new information
String address = HConstants.LOCALHOST_IP + ":" + HBaseTestingUtility.randomFreePort();
put = new Put(ROW_KEY, System.currentTimeMillis());
- put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
Bytes.toBytes(address));
// put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE));
- region.put(put);
+ table.put(put);
// Validate that we can still get the HRegionInfo, even though it is in
// an older row on disk and there is a newer row in the memstore
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
// flush cache
-
- region.flushcache();
+ this.region.flush(true);
// Validate again
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
// Close and reopen
- ((HRegion)r).close();
- r = HRegion.openHRegion(r,null);
- region = new HRegionIncommon(r);
+ ((HRegion)region).close();
+ region = HRegion.openHRegion(region,null);
+ table = new RegionTable(region);
// Validate again
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
// Now update the information again
@@ -324,38 +321,37 @@ public class TestScanner {
put = new Put(ROW_KEY, System.currentTimeMillis());
- put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(address));
- region.put(put);
+ put.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(address));
+ table.put(put);
// Validate again
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
// flush cache
- region.flushcache();
+ region.flush(true);
// Validate again
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
// Close and reopen
- ((HRegion)r).close();
- r = HRegion.openHRegion(r,null);
- region = new HRegionIncommon(r);
+ ((HRegion)this.region).close();
+ this.region = HRegion.openHRegion(region, null);
+ table = new RegionTable(this.region);
// Validate again
scan(true, address.toString());
- getRegionInfo();
+ getRegionInfo(table);
} finally {
// clean up
- HBaseTestingUtility.closeRegionAndWAL(r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -387,7 +383,7 @@ public class TestScanner {
for (int ii = 0; ii < EXPLICIT_COLS.length; ii++) {
scan.addColumn(COLS[0], EXPLICIT_COLS[ii]);
}
- scanner = r.getScanner(scan);
+ scanner = region.getScanner(scan);
while (scanner.next(results)) {
assertTrue(hasColumn(results, HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER));
@@ -448,10 +444,10 @@ public class TestScanner {
/** Use get to retrieve the HRegionInfo and validate it */
- private void getRegionInfo() throws IOException {
+ private void getRegionInfo(Table table) throws IOException {
Get get = new Get(ROW_KEY);
get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- Result result = region.get(get);
+ Result result = table.get(get);
byte [] bytes = result.value();
validateRegionInfo(bytes);
}
@@ -464,10 +460,11 @@ public class TestScanner {
*/
@Test
public void testScanAndSyncFlush() throws Exception {
- this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HRegionIncommon hri = new HRegionIncommon(r);
+ this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ Table hri = new RegionTable(region);
try {
- LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
+ LOG.info("Added: " +
+ HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
int count = count(hri, -1, false);
assertEquals(count, count(hri, 100, false)); // do a sync flush.
@@ -475,7 +472,7 @@ public class TestScanner {
LOG.error("Failed", e);
throw e;
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -487,10 +484,11 @@ public class TestScanner {
*/
@Test
public void testScanAndRealConcurrentFlush() throws Exception {
- this.r = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
- HRegionIncommon hri = new HRegionIncommon(r);
+ this.region = TEST_UTIL.createLocalHRegion(TESTTABLEDESC, null, null);
+ Table hri = new RegionTable(region);
try {
- LOG.info("Added: " + HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
+ LOG.info("Added: " +
+ HBaseTestCase.addContent(hri, Bytes.toString(HConstants.CATALOG_FAMILY),
Bytes.toString(HConstants.REGIONINFO_QUALIFIER)));
int count = count(hri, -1, false);
assertEquals(count, count(hri, 100, true)); // do a true concurrent background thread flush
@@ -498,7 +496,7 @@ public class TestScanner {
LOG.error("Failed", e);
throw e;
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -512,8 +510,8 @@ public class TestScanner {
@SuppressWarnings("deprecation")
public void testScanAndConcurrentMajorCompact() throws Exception {
HTableDescriptor htd = TEST_UTIL.createTableDescriptor(name.getMethodName());
- this.r = TEST_UTIL.createLocalHRegion(htd, null, null);
- HRegionIncommon hri = new HRegionIncommon(r);
+ this.region = TEST_UTIL.createLocalHRegion(htd, null, null);
+ Table hri = new RegionTable(region);
try {
HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
@@ -524,18 +522,18 @@ public class TestScanner {
Delete dc = new Delete(firstRowBytes);
/* delete column1 of firstRow */
dc.deleteColumns(fam1, col1);
- r.delete(dc);
- r.flush(true);
+ region.delete(dc);
+ region.flush(true);
HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1),
secondRowBytes, thirdRowBytes);
HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1),
secondRowBytes, thirdRowBytes);
- r.flush(true);
+ region.flush(true);
- InternalScanner s = r.getScanner(new Scan());
+ InternalScanner s = region.getScanner(new Scan());
// run a major compact, column1 of firstRow will be cleaned.
- r.compact(true);
+ region.compact(true);
List results = new ArrayList();
s.next(results);
@@ -555,7 +553,7 @@ public class TestScanner {
assertTrue(CellUtil.matchingFamily(results.get(0), fam1));
assertTrue(CellUtil.matchingFamily(results.get(1), fam2));
} finally {
- HBaseTestingUtility.closeRegionAndWAL(this.r);
+ HBaseTestingUtility.closeRegionAndWAL(this.region);
}
}
@@ -567,19 +565,20 @@ public class TestScanner {
* @return Count of rows found.
* @throws IOException
*/
- private int count(final HRegionIncommon hri, final int flushIndex,
- boolean concurrent)
+ private int count(final Table countTable, final int flushIndex, boolean concurrent)
throws IOException {
LOG.info("Taking out counting scan");
- ScannerIncommon s = hri.getScanner(HConstants.CATALOG_FAMILY, EXPLICIT_COLS,
- HConstants.EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP);
- List values = new ArrayList| ();
+ Scan scan = new Scan();
+ for (byte [] qualifier: EXPLICIT_COLS) {
+ scan.addColumn(HConstants.CATALOG_FAMILY, qualifier);
+ }
+ ResultScanner s = countTable.getScanner(scan);
int count = 0;
boolean justFlushed = false;
- while (s.next(values)) {
+ while (s.next() != null) {
if (justFlushed) {
LOG.info("after next() just after next flush");
- justFlushed=false;
+ justFlushed = false;
}
count++;
if (flushIndex == count) {
@@ -587,7 +586,7 @@ public class TestScanner {
Thread t = new Thread() {
public void run() {
try {
- hri.flushcache();
+ region.flush(true);
LOG.info("Finishing flush");
} catch (IOException e) {
LOG.info("Failed flush cache");
@@ -607,5 +606,4 @@ public class TestScanner {
LOG.info("Found " + count + " items");
return count;
}
-
}
| | | | | | | | | | | | | | | | | |