From 31c02ca58139909c51d361c47734bf2c587d0c5e Mon Sep 17 00:00:00 2001 From: mbautin Date: Tue, 21 Feb 2012 19:04:53 -0800 Subject: [PATCH] Use builder pattern for HColumnDescriptor --- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 63 +++++++++++-------- .../org/apache/hadoop/hbase/HTableDescriptor.java | 27 +++++--- .../client/UnmodifyableHColumnDescriptor.java | 14 ++-- .../hadoop/hbase/thrift/ThriftUtilities.java | 10 ++- .../org/apache/hadoop/hbase/HBaseTestCase.java | 36 +++-------- .../apache/hadoop/hbase/HBaseTestingUtility.java | 66 +++---------------- .../org/apache/hadoop/hbase/TestSerialization.java | 27 ++++----- .../hadoop/hbase/client/TestFromClientSide.java | 15 +---- .../hbase/io/encoding/TestEncodedSeekers.java | 10 ++- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 14 +++- .../io/hfile/TestForceCacheImportantBlocks.java | 10 +-- .../io/hfile/TestScannerSelectionUsingTTL.java | 9 +-- .../hbase/mapreduce/TestHFileOutputFormat.java | 7 ++- .../hadoop/hbase/mapreduce/TestImportExport.java | 34 +++-------- .../hadoop/hbase/regionserver/TestBlocksRead.java | 30 +++++---- .../hbase/regionserver/TestColumnSeeking.java | 7 +-- .../hbase/regionserver/TestFSErrorsExposed.java | 7 +- .../hadoop/hbase/regionserver/TestHRegion.java | 22 +++---- .../hbase/regionserver/TestMultiColumnScanner.java | 11 +++- .../hbase/regionserver/TestScanWithBloomError.java | 10 ++- .../hadoop/hbase/regionserver/TestScanner.java | 12 ++-- .../hbase/regionserver/TestSeekOptimizations.java | 7 ++- .../hadoop/hbase/regionserver/TestWideScanner.java | 22 ++----- .../thrift2/TestThriftHBaseServiceHandler.java | 5 +- 24 files changed, 204 insertions(+), 271 deletions(-) diff --git src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 685a1a6..bb895a6 100644 --- src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -244,7 +244,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, final boolean blockCacheEnabled, @@ -274,7 +276,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte [] familyName, final int maxVersions, final String compression, final boolean inMemory, final boolean blockCacheEnabled, final int blocksize, @@ -312,7 +316,9 @@ public class HColumnDescriptor implements WritableComparable * other than 'word' characters: i.e. [a-zA-Z_0-9] or contains * a : * @throws IllegalArgumentException if the number of versions is <= 0 + * @deprecated use {@link #HColumnDescriptor(String)} and setters */ + @Deprecated public HColumnDescriptor(final byte[] familyName, final int minVersions, final int maxVersions, final boolean keepDeletedCells, final String compression, final boolean encodeOnDisk, @@ -428,9 +434,10 @@ public class HColumnDescriptor implements WritableComparable * @param key The key. * @param value The value. */ - public void setValue(byte[] key, byte[] value) { + public HColumnDescriptor setValue(byte[] key, byte[] value) { values.put(new ImmutableBytesWritable(key), new ImmutableBytesWritable(value)); + return this; } /** @@ -444,8 +451,9 @@ public class HColumnDescriptor implements WritableComparable * @param key The key. * @param value The value. */ - public void setValue(String key, String value) { + public HColumnDescriptor setValue(String key, String value) { setValue(Bytes.toBytes(key), Bytes.toBytes(value)); + return this; } /** @return compression type being used for the column family */ @@ -475,9 +483,10 @@ public class HColumnDescriptor implements WritableComparable /** * @param maxVersions maximum number of versions */ - public void setMaxVersions(int maxVersions) { + public HColumnDescriptor setMaxVersions(int maxVersions) { setValue(HConstants.VERSIONS, Integer.toString(maxVersions)); cachedMaxVersions = maxVersions; + return this; } /** @@ -496,9 +505,10 @@ public class HColumnDescriptor implements WritableComparable * @param s Blocksize to use when writing out storefiles/hfiles on this * column family. */ - public void setBlocksize(int s) { + public HColumnDescriptor setBlocksize(int s) { setValue(BLOCKSIZE, Integer.toString(s)); this.blocksize = null; + return this; } /** @@ -515,7 +525,7 @@ public class HColumnDescriptor implements WritableComparable * for how to enable it. * @param type Compression type setting. */ - public void setCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { String compressionType; switch (type) { case LZO: compressionType = "LZO"; break; @@ -523,7 +533,7 @@ public class HColumnDescriptor implements WritableComparable case SNAPPY: compressionType = "SNAPPY"; break; default: compressionType = "NONE"; break; } - setValue(COMPRESSION, compressionType); + return setValue(COMPRESSION, compressionType); } /** @return data block encoding algorithm used on disk */ @@ -547,8 +557,8 @@ public class HColumnDescriptor implements WritableComparable * Set the flag indicating that we only want to encode data block in cache * but not on disk. */ - public void setEncodeOnDisk(boolean encodeOnDisk) { - setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); + public HColumnDescriptor setEncodeOnDisk(boolean encodeOnDisk) { + return setValue(ENCODE_ON_DISK, String.valueOf(encodeOnDisk)); } /** @@ -567,14 +577,14 @@ public class HColumnDescriptor implements WritableComparable * Set data block encoding algorithm used in block cache. * @param type What kind of data block encoding will be used. */ - public void setDataBlockEncoding(DataBlockEncoding type) { + public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding type) { String name; if (type != null) { name = type.toString(); } else { name = DataBlockEncoding.NONE.toString(); } - setValue(DATA_BLOCK_ENCODING, name); + return setValue(DATA_BLOCK_ENCODING, name); } /** @@ -591,7 +601,8 @@ public class HColumnDescriptor implements WritableComparable * for how to enable it. * @param type Compression type setting. */ - public void setCompactionCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompactionCompressionType( + Compression.Algorithm type) { String compressionType; switch (type) { case LZO: compressionType = "LZO"; break; @@ -599,7 +610,7 @@ public class HColumnDescriptor implements WritableComparable case SNAPPY: compressionType = "SNAPPY"; break; default: compressionType = "NONE"; break; } - setValue(COMPRESSION_COMPACT, compressionType); + return setValue(COMPRESSION_COMPACT, compressionType); } /** @@ -616,8 +627,8 @@ public class HColumnDescriptor implements WritableComparable * @param inMemory True if we are to keep all values in the HRegionServer * cache */ - public void setInMemory(boolean inMemory) { - setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); + public HColumnDescriptor setInMemory(boolean inMemory) { + return setValue(HConstants.IN_MEMORY, Boolean.toString(inMemory)); } public boolean getKeepDeletedCells() { @@ -632,8 +643,8 @@ public class HColumnDescriptor implements WritableComparable * @param keepDeletedCells True if deleted rows should not be collected * immediately. */ - public void setKeepDeletedCells(boolean keepDeletedCells) { - setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); + public HColumnDescriptor setKeepDeletedCells(boolean keepDeletedCells) { + return setValue(KEEP_DELETED_CELLS, Boolean.toString(keepDeletedCells)); } /** @@ -647,8 +658,8 @@ public class HColumnDescriptor implements WritableComparable /** * @param timeToLive Time-to-live of cell contents, in seconds. */ - public void setTimeToLive(int timeToLive) { - setValue(TTL, Integer.toString(timeToLive)); + public HColumnDescriptor setTimeToLive(int timeToLive) { + return setValue(TTL, Integer.toString(timeToLive)); } /** @@ -663,8 +674,8 @@ public class HColumnDescriptor implements WritableComparable * @param minVersions The minimum number of versions to keep. * (used when timeToLive is set) */ - public void setMinVersions(int minVersions) { - setValue(MIN_VERSIONS, Integer.toString(minVersions)); + public HColumnDescriptor setMinVersions(int minVersions) { + return setValue(MIN_VERSIONS, Integer.toString(minVersions)); } /** @@ -680,8 +691,8 @@ public class HColumnDescriptor implements WritableComparable /** * @param blockCacheEnabled True if MapFile blocks should be cached. */ - public void setBlockCacheEnabled(boolean blockCacheEnabled) { - setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { + return setValue(BLOCKCACHE, Boolean.toString(blockCacheEnabled)); } /** @@ -698,8 +709,8 @@ public class HColumnDescriptor implements WritableComparable /** * @param bt bloom filter type */ - public void setBloomFilterType(final StoreFile.BloomType bt) { - setValue(BLOOMFILTER, bt.toString()); + public HColumnDescriptor setBloomFilterType(final StoreFile.BloomType bt) { + return setValue(BLOOMFILTER, bt.toString()); } /** @@ -716,8 +727,8 @@ public class HColumnDescriptor implements WritableComparable /** * @param scope the scope tag */ - public void setScope(int scope) { - setValue(REPLICATION_SCOPE, Integer.toString(scope)); + public HColumnDescriptor setScope(int scope) { + return setValue(REPLICATION_SCOPE, Integer.toString(scope)); } /** diff --git src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 472a22e..503b9cb 100644 --- src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1113,21 +1113,26 @@ public class HTableDescriptor implements WritableComparable { /** Table descriptor for -ROOT- catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( HConstants.ROOT_TABLE_NAME, - new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debugging. - Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL) }); + new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setTimeToLive(HConstants.FOREVER) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( HConstants.META_TABLE_NAME, new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debugging. - Compression.Algorithm.NONE.getName(), true, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL)}); - + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); public void setOwner(User owner) { setOwnerString(owner != null ? owner.getShortName() : null); diff --git src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java index d99f02a..301ea12 100644 --- src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHColumnDescriptor.java @@ -39,7 +39,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(byte[], byte[]) */ @Override - public void setValue(byte[] key, byte[] value) { + public HColumnDescriptor setValue(byte[] key, byte[] value) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -47,7 +47,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setValue(java.lang.String, java.lang.String) */ @Override - public void setValue(String key, String value) { + public HColumnDescriptor setValue(String key, String value) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -55,7 +55,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setMaxVersions(int) */ @Override - public void setMaxVersions(int maxVersions) { + public HColumnDescriptor setMaxVersions(int maxVersions) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -63,7 +63,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean) */ @Override - public void setInMemory(boolean inMemory) { + public HColumnDescriptor setInMemory(boolean inMemory) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -71,7 +71,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setBlockCacheEnabled(boolean) */ @Override - public void setBlockCacheEnabled(boolean blockCacheEnabled) { + public HColumnDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -79,7 +79,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setTimeToLive(int) */ @Override - public void setTimeToLive(int timeToLive) { + public HColumnDescriptor setTimeToLive(int timeToLive) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } @@ -87,7 +87,7 @@ public class UnmodifyableHColumnDescriptor extends HColumnDescriptor { * @see org.apache.hadoop.hbase.HColumnDescriptor#setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression.Algorithm) */ @Override - public void setCompressionType(Compression.Algorithm type) { + public HColumnDescriptor setCompressionType(Compression.Algorithm type) { throw new UnsupportedOperationException("HColumnDescriptor is read-only"); } } \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java index 790f034..d7fa95e 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java +++ src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java @@ -57,9 +57,13 @@ public class ThriftUtilities { throw new IllegalArgument("column name is empty"); } byte [] parsedName = KeyValue.parseColumn(Bytes.getBytes(in.name))[0]; - HColumnDescriptor col = new HColumnDescriptor(parsedName, - in.maxVersions, comp.getName(), in.inMemory, in.blockCacheEnabled, - in.timeToLive, bt.toString()); + HColumnDescriptor col = new HColumnDescriptor(parsedName) + .setMaxVersions(in.maxVersions) + .setCompressionType(comp) + .setInMemory(in.inMemory) + .setBlockCacheEnabled(in.blockCacheEnabled) + .setTimeToLive(in.timeToLive) + .setBloomFilterType(bt); return col; } diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index e3ddc29..5fb9e2c 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -220,33 +220,15 @@ public abstract class HBaseTestCase extends TestCase { protected HTableDescriptor createTableDescriptor(final String name, final int minVersions, final int versions, final int ttl, boolean keepDeleted) { HTableDescriptor htd = new HTableDescriptor(name); - htd.addFamily(new HColumnDescriptor(fam1, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam2, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam3, minVersions, versions, - keepDeleted, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, ttl, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { + htd.addFamily(new HColumnDescriptor(cfName) + .setMinVersions(minVersions) + .setMaxVersions(versions) + .setKeepDeletedCells(keepDeleted) + .setBlockCacheEnabled(false) + .setTimeToLive(ttl) + ); + } return htd; } diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index ec9d581..849e956 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -67,11 +66,9 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Keying; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; @@ -775,13 +772,8 @@ public class HBaseTestingUtility { throws IOException{ HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc, startKey, endKey, numRegions); @@ -821,13 +813,8 @@ public class HBaseTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for(byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -860,13 +847,8 @@ public class HBaseTestingUtility { throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -885,13 +867,9 @@ public class HBaseTestingUtility { int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - blockSize, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions) + .setBlocksize(blockSize); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc); @@ -912,13 +890,8 @@ public class HBaseTestingUtility { HTableDescriptor desc = new HTableDescriptor(tableName); int i = 0; for (byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family, numVersions[i], - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions[i]); desc.addFamily(hcd); i++; } @@ -1930,23 +1903,6 @@ public class HBaseTestingUtility { return hloc.getPort(); } - public HRegion createTestRegion(String tableName, String cfName, - Compression.Algorithm comprAlgo, BloomType bloomType, int maxVersions, - int blockSize, DataBlockEncoding encoding, boolean encodeOnDisk) - throws IOException { - HColumnDescriptor hcd = - new HColumnDescriptor(Bytes.toBytes(cfName), maxVersions, - comprAlgo.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - bloomType.toString()); - hcd.setBlocksize(blockSize); - hcd.setDataBlockEncoding(encoding); - hcd.setEncodeOnDisk(encodeOnDisk); - return createTestRegion(tableName, hcd); - } - public HRegion createTestRegion(String tableName, HColumnDescriptor hcd) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); diff --git src/test/java/org/apache/hadoop/hbase/TestSerialization.java src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 0d7d0d2..21c92ff 100644 --- src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -558,21 +558,18 @@ public class TestSerialization { protected HTableDescriptor createTableDescriptor(final String name, final int versions) { HTableDescriptor htd = new HTableDescriptor(name); - htd.addFamily(new HColumnDescriptor(fam1, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam2, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); - htd.addFamily(new HColumnDescriptor(fam3, versions, - HColumnDescriptor.DEFAULT_COMPRESSION, false, false, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HConstants.FOREVER, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + htd.addFamily(new HColumnDescriptor(fam1) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); + htd.addFamily(new HColumnDescriptor(fam2) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); + htd.addFamily(new HColumnDescriptor(fam3) + .setMaxVersions(versions) + .setBlockCacheEnabled(false) + ); return htd; } diff --git src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index a10f31c..bdeaefe 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -157,19 +157,8 @@ public class TestFromClientSide { final byte[] T1 = Bytes.toBytes("T1"); final byte[] T2 = Bytes.toBytes("T2"); final byte[] T3 = Bytes.toBytes("T3"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - HColumnDescriptor.DEFAULT_VERSIONS, - true, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) + .setKeepDeletedCells(true); HTableDescriptor desc = new HTableDescriptor(TABLENAME); desc.addFamily(hcd); diff --git src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index 26c76f9..f40afe4 100644 --- src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -94,9 +95,12 @@ public class TestEncodedSeekers { new CacheConfig(testUtil.getConfiguration()).getBlockCache(); cache.clearCache(); - HRegion region = testUtil.createTestRegion(TABLE_NAME, CF_NAME, - Algorithm.NONE, BloomType.NONE, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - encoding, encodeOnDisk); + HRegion region = testUtil.createTestRegion( + TABLE_NAME, new HColumnDescriptor(CF_NAME) + .setMaxVersions(MAX_VERSIONS) + .setDataBlockEncoding(encoding) + .setEncodeOnDisk(encodeOnDisk) + ); LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator( MIN_VALUE_SIZE, MAX_VALUE_SIZE); diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 995a26e..687695a 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Put; @@ -315,10 +316,15 @@ public class TestCacheOnWrite { final String cf = "myCF"; final byte[] cfBytes = Bytes.toBytes(cf); final int maxVersions = 3; - HRegion region = TEST_UTIL.createTestRegion(table, cf, compress, - BLOOM_TYPE, maxVersions, HFile.DEFAULT_BLOCKSIZE, - encoder.getEncodingInCache(), - encoder.getEncodingOnDisk() != DataBlockEncoding.NONE); + HRegion region = TEST_UTIL.createTestRegion(table, + new HColumnDescriptor(cf) + .setCompressionType(compress) + .setBloomFilterType(BLOOM_TYPE) + .setMaxVersions(maxVersions) + .setDataBlockEncoding(encoder.getEncodingInCache()) + .setEncodeOnDisk(encoder.getEncodingOnDisk() != + DataBlockEncoding.NONE) + ); int rowIdx = 0; long ts = EnvironmentEdgeManager.currentTimeMillis(); for (int iFile = 0; iFile < 5; ++iFile) { diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 9e38332..5f8214e 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -104,12 +104,10 @@ public class TestForceCacheImportantBlocks { SchemaMetrics.setUseTableNameInTest(false); HColumnDescriptor hcd = - new HColumnDescriptor(Bytes.toBytes(CF), MAX_VERSIONS, - COMPRESSION_ALGORITHM.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - BLOOM_TYPE.toString()); + new HColumnDescriptor(Bytes.toBytes(CF)) + .setMaxVersions(MAX_VERSIONS) + .setCompressionType(COMPRESSION_ALGORITHM) + .setBloomFilterType(BLOOM_TYPE); hcd.setBlocksize(BLOCK_SIZE); hcd.setBlockCacheEnabled(cfCacheEnabled); HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 52a1daa..17284a3 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -99,12 +99,9 @@ public class TestScannerSelectionUsingTTL { @Test public void testScannerSelection() throws IOException { HColumnDescriptor hcd = - new HColumnDescriptor(FAMILY_BYTES, Integer.MAX_VALUE, - Compression.Algorithm.NONE.getName(), - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - TTL_SECONDS, - BloomType.NONE.toString()); + new HColumnDescriptor(FAMILY_BYTES) + .setMaxVersions(Integer.MAX_VALUE) + .setTimeToLive(TTL_SECONDS); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE)); diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index b606fad..a6bcbac 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -511,8 +511,11 @@ public class TestHFileOutputFormat { { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey().getBytes(), 1, entry.getValue().getName(), - false, false, 0, "none")); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) + .setMaxVersions(1) + .setCompressionType(entry.getValue()) + .setBlockCacheEnabled(false) + .setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 0029d94..a3faeb3 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -145,19 +145,10 @@ public class TestImportExport { public void testWithDeletes() throws Exception { String EXPORT_TABLE = "exportWithDeletes"; HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE); - desc.addFamily(new HColumnDescriptor(FAMILYA, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - 5, /* versions */ - true /* keep deleted cells */, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + desc.addFamily(new HColumnDescriptor(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(true) + ); UTIL.getHBaseAdmin().createTable(desc); HTable t = new HTable(UTIL.getConfiguration(), EXPORT_TABLE); @@ -193,19 +184,10 @@ public class TestImportExport { String IMPORT_TABLE = "importWithDeletes"; desc = new HTableDescriptor(IMPORT_TABLE); - desc.addFamily(new HColumnDescriptor(FAMILYA, - HColumnDescriptor.DEFAULT_MIN_VERSIONS, - 5, /* versions */ - true /* keep deleted cells */, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_ENCODE_ON_DISK, - HColumnDescriptor.DEFAULT_DATA_BLOCK_ENCODING, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER, - HConstants.REPLICATION_SCOPE_LOCAL)); + desc.addFamily(new HColumnDescriptor(FAMILYA) + .setMaxVersions(5) + .setKeepDeletedCells(true) + ); UTIL.getHBaseAdmin().createTable(desc); t.close(); t = new HTable(UTIL.getConfiguration(), IMPORT_TABLE); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index 6ccb109..bd83a71 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -28,7 +28,14 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestCase; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -36,7 +43,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.LruBlockCache; +import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.junit.Test; @@ -45,7 +52,8 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestBlocksRead extends HBaseTestCase { static final Log LOG = LogFactory.getLog(TestBlocksRead.class); - static final String[] BLOOM_TYPE = new String[] { "ROWCOL", "ROW", "NONE" }; + static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, + BloomType.ROW, BloomType.NONE }; private static BlockCache blockCache; @@ -82,16 +90,10 @@ public class TestBlocksRead extends HBaseTestCase { HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor familyDesc; for (int i = 0; i < BLOOM_TYPE.length; i++) { - String bloomType = BLOOM_TYPE[i]; - familyDesc = new HColumnDescriptor( - Bytes.toBytes(family + "_" + bloomType), - HColumnDescriptor.DEFAULT_VERSIONS, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - 1, // small block size deliberate; each kv on its own block - HColumnDescriptor.DEFAULT_TTL, BLOOM_TYPE[i], - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + BloomType bloomType = BLOOM_TYPE[i]; + familyDesc = new HColumnDescriptor(family + "_" + bloomType) + .setBlocksize(1) + .setBloomFilterType(BLOOM_TYPE[i]); htd.addFamily(familyDesc); } @@ -138,7 +140,7 @@ public class TestBlocksRead extends HBaseTestCase { KeyValue[] kvs = null; for (int i = 0; i < BLOOM_TYPE.length; i++) { - String bloomType = BLOOM_TYPE[i]; + BloomType bloomType = BLOOM_TYPE[i]; byte[] cf = Bytes.toBytes(family + "_" + bloomType); long blocksStart = getBlkAccessCount(cf); Get get = new Get(Bytes.toBytes(row)); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 3a9c9fb..84296ab 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -55,12 +55,7 @@ public class TestColumnSeeking { String table = "TestDuplicateVersions"; HColumnDescriptor hcd = - new HColumnDescriptor(familyBytes, 1000, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_TTL, - HColumnDescriptor.DEFAULT_BLOOMFILTER); + new HColumnDescriptor(familyBytes).setMaxVersions(1000); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 91b869e..eebef46 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -168,9 +168,10 @@ public class TestFSErrorsExposed { HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); HTableDescriptor desc = new HTableDescriptor(tableName); - desc.addFamily(new HColumnDescriptor( - fam, 1, HColumnDescriptor.DEFAULT_COMPRESSION, - false, false, HConstants.FOREVER, "NONE")); + desc.addFamily(new HColumnDescriptor(fam) + .setMaxVersions(1) + .setBlockCacheEnabled(false) + ); admin.createTable(desc); // Make it fail faster. util.getConfiguration().setInt("hbase.client.retries.number", 1); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 97c624d..8db43a4 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -3033,9 +3033,9 @@ public class TestHRegion extends HBaseTestCase { byte [] qf1 = Bytes.toBytes("col"); byte [] val1 = Bytes.toBytes("value1"); // Create Table - HColumnDescriptor hcd = new HColumnDescriptor(fam1, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, false, true, - HColumnDescriptor.DEFAULT_TTL, "rowcol"); + HColumnDescriptor hcd = new HColumnDescriptor(fam1) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); @@ -3089,13 +3089,9 @@ public class TestHRegion extends HBaseTestCase { byte [] FAMILY = Bytes.toBytes("family"); //Create table - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, - HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, - HColumnDescriptor.DEFAULT_BLOCKSIZE, HColumnDescriptor.DEFAULT_TTL, - "rowcol", - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE); + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); @@ -3138,9 +3134,9 @@ public class TestHRegion extends HBaseTestCase { byte [] familyName = Bytes.toBytes("familyName"); // Create Table - HColumnDescriptor hcd = new HColumnDescriptor(familyName, Integer.MAX_VALUE, - HColumnDescriptor.DEFAULT_COMPRESSION, false, true, - HColumnDescriptor.DEFAULT_TTL, "rowcol"); + HColumnDescriptor hcd = new HColumnDescriptor(familyName) + .setMaxVersions(Integer.MAX_VALUE) + .setBloomFilterType(BloomType.ROWCOL); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 3de3ec5..b716c53 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -39,6 +39,7 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.MediumTests; @@ -150,9 +151,13 @@ public class TestMultiColumnScanner { @Test public void testMultiColumnScanner() throws IOException { - HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY, comprAlgo, - bloomType, MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - dataBlockEncoding, true); + HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, + new HColumnDescriptor(FAMILY) + .setCompressionType(comprAlgo) + .setBloomFilterType(bloomType) + .setMaxVersions(MAX_VERSIONS) + .setDataBlockEncoding(dataBlockEncoding) + ); List rows = sequentialStrings("row", NUM_ROWS); List qualifiers = sequentialStrings("qual", NUM_COLUMNS); List kvs = new ArrayList(); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 786bf82..bdb1231 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.SmallTests; @@ -102,10 +103,11 @@ public class TestScanWithBloomError { @Test public void testThreeStoreFiles() throws IOException { - region = TEST_UTIL.createTestRegion(TABLE_NAME, FAMILY, - Compression.Algorithm.GZ, bloomType, - TestMultiColumnScanner.MAX_VERSIONS, HFile.DEFAULT_BLOCKSIZE, - DataBlockEncoding.NONE, true); + region = TEST_UTIL.createTestRegion(TABLE_NAME, + new HColumnDescriptor(FAMILY) + .setCompressionType(Compression.Algorithm.GZ) + .setBloomFilterType(bloomType) + .setMaxVersions(TestMultiColumnScanner.MAX_VERSIONS)); createStoreFile(new int[] {1, 2, 6}); createStoreFile(new int[] {1, 2, 3, 7}); createStoreFile(new int[] {1, 9}); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 6e95bb2..32e8d18 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -67,11 +67,13 @@ public class TestScanner extends HBaseTestCase { static final HTableDescriptor TESTTABLEDESC = new HTableDescriptor("testscanner"); static { - TESTTABLEDESC.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY, - 10, // Ten is arbitrary number. Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HConstants.REPLICATION_SCOPE_LOCAL)); + TESTTABLEDESC.addFamily( + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + // Ten is an arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setBlockCacheEnabled(false) + .setBlocksize(8 * 1024) + ); } /** HRegionInfo for root region */ public static final HRegionInfo REGION_INFO = diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index 9a77ebf..2a092e7 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -37,6 +37,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; @@ -142,8 +143,10 @@ public class TestSeekOptimizations { @Test public void testMultipleTimestampRanges() throws IOException { region = TEST_UTIL.createTestRegion(TestSeekOptimizations.class.getName(), - FAMILY, comprAlgo, bloomType, Integer.MAX_VALUE, - HFile.DEFAULT_BLOCKSIZE, DataBlockEncoding.NONE, true); + new HColumnDescriptor(FAMILY) + .setCompressionType(comprAlgo) + .setBloomFilterType(bloomType) + ); // Delete the given timestamp and everything before. final long latestDelTS = USE_MANY_STORE_FILES ? 1397 : -1; diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 5ce6653..d212c4e 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -48,21 +48,13 @@ public class TestWideScanner extends HBaseTestCase { static final HTableDescriptor TESTTABLEDESC = new HTableDescriptor("testwidescan"); static { - TESTTABLEDESC.addFamily(new HColumnDescriptor(A, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); - TESTTABLEDESC.addFamily(new HColumnDescriptor(B, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); - TESTTABLEDESC.addFamily(new HColumnDescriptor(C, - 100, // Keep versions to help debuggging. - Compression.Algorithm.NONE.getName(), false, true, 8 * 1024, - HConstants.FOREVER, StoreFile.BloomType.NONE.toString(), - HColumnDescriptor.DEFAULT_REPLICATION_SCOPE)); + for (byte[] cfName : new byte[][] { A, B, C }) { + TESTTABLEDESC.addFamily(new HColumnDescriptor(cfName) + // Keep versions to help debuggging. + .setMaxVersions(10) + .setBlocksize(8 * 1024) + ); + } } /** HRegionInfo for root region */ diff --git src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 3aa4950..b848ac7 100644 --- src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -87,8 +87,9 @@ public class TestThriftHBaseServiceHandler { private static byte[] valueBname = Bytes.toBytes("valueB"); private static HColumnDescriptor[] families = new HColumnDescriptor[] { new HColumnDescriptor(familyAname), - new HColumnDescriptor(familyBname, 2, HColumnDescriptor.DEFAULT_COMPRESSION, HColumnDescriptor.DEFAULT_IN_MEMORY, - HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_BLOOMFILTER) }; + new HColumnDescriptor(familyBname) + .setMaxVersions(2) + }; public void assertTColumnValuesEqual(List columnValuesA, List columnValuesB) { assertEquals(columnValuesA.size(), columnValuesB.size()); -- 1.7.4.4