diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
index 2c76073..8688883 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.mapreduce;
+import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
@@ -29,7 +30,6 @@ import java.util.Map;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.UUID;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
+import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -67,7 +68,7 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
/**
* Writes HFiles. Passed KeyValues must arrive in order.
* Writes current time as the sequence id for the file. Sets the major compacted
- * attribute on created hfiles. Calling write(null,null) will forceably roll
+ * attribute on created hfiles. Calling write(null,null) will forcibly roll
* all HFiles being written.
*
* Using this class as part of a MapReduce job is best done
@@ -78,11 +79,26 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
@InterfaceStability.Stable
public class HFileOutputFormat extends FileOutputFormat {
static Log LOG = LogFactory.getLog(HFileOutputFormat.class);
- static final String COMPRESSION_CONF_KEY = "hbase.hfileoutputformat.families.compression";
- private static final String BLOOM_TYPE_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype";
- private static final String DATABLOCK_ENCODING_CONF_KEY =
- "hbase.mapreduce.hfileoutputformat.datablock.encoding";
- private static final String BLOCK_SIZE_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize";
+
+ // The following constants are private since these are used by
+ // HFileOutputFormat to internally transfer data between job setup and
+ // reducer run using conf.
+ // These should not be changed by the client.
+ private static final String COMPRESSION_FAMILIES_CONF_KEY =
+ "hbase.hfileoutputformat.families.compression";
+ private static final String BLOOM_TYPE_FAMILIES_CONF_KEY =
+ "hbase.hfileoutputformat.families.bloomtype";
+ private static final String BLOCK_SIZE_FAMILIES_CONF_KEY =
+ "hbase.mapreduce.hfileoutputformat.blocksize";
+ private static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY =
+ "hbase.mapreduce.hfileoutputformat.families.datablock.encoding";
+
+ // This constant is public since the client can modify this when setting
+ // up their conf object and thus refer to this symbol.
+ // It is present for backwards compatibility reasons. Use it only to
+ // override the auto-detection of datablock encoding.
+ public static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY =
+ "hbase.mapreduce.hfileoutputformat.datablock.encoding";
public RecordWriter getRecordWriter(final TaskAttemptContext context)
throws IOException, InterruptedException {
@@ -95,17 +111,27 @@ public class HFileOutputFormat extends FileOutputFormat compressionMap = createFamilyCompressionMap(conf);
- final Map bloomTypeMap = createFamilyBloomMap(conf);
- final Map blockSizeMap = createFamilyBlockSizeMap(conf);
-
- final String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_CONF_KEY);
+ final Map compressionMap = createFamilyCompressionMap(conf);
+ final Map bloomTypeMap = createFamilyBloomTypeMap(conf);
+ final Map blockSizeMap = createFamilyBlockSizeMap(conf);
+
+ String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
+ final Map datablockEncodingMap
+ = createFamilyDataBlockEncodingMap(conf);
+ final DataBlockEncoding overriddenEncoding;
+ if (dataBlockEncodingStr != null) {
+ overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
+ } else {
+ overriddenEncoding = null;
+ }
return new RecordWriter() {
// Map of families to writers and how much has been output on the writer.
@@ -180,26 +206,23 @@ public class HFileOutputFormat extends FileOutputFormat families = tableDescriptor.getFamilies();
- int i = 0;
- for (HColumnDescriptor familyDescriptor : families) {
- if (i++ > 0) {
- blockSizeConfigValue.append('&');
- }
- blockSizeConfigValue.append(URLEncoder.encode(
- familyDescriptor.getNameAsString(), "UTF-8"));
- blockSizeConfigValue.append('=');
- blockSizeConfigValue.append(URLEncoder.encode(
- String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
+ /**
+ * Runs inside the task to deserialize column family to compression algorithm
+ * map from the configuration.
+ *
+ * @param conf to read the serialized values from
+ * @return a map from column family to the configured compression algorithm
+ */
+ @VisibleForTesting
+ static Map createFamilyCompressionMap(Configuration
+ conf) {
+ Map stringMap = createFamilyConfValueMap(conf,
+ COMPRESSION_FAMILIES_CONF_KEY);
+ Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR);
+ for (Map.Entry e : stringMap.entrySet()) {
+ Algorithm algorithm = AbstractHFileWriter.compressionByName
+ (e.getValue());
+ compressionMap.put(e.getKey(), algorithm);
}
- // Get rid of the last ampersand
- conf.set(BLOCK_SIZE_CONF_KEY, blockSizeConfigValue.toString());
+ return compressionMap;
}
/**
- * Run inside the task to deserialize column family to compression algorithm
- * map from the
- * configuration.
+ * Runs inside the task to deserialize column family to bloom filter type
+ * map from the configuration.
*
- * Package-private for unit tests only.
- *
- * @return a map from column family to the name of the configured compression
- * algorithm
+ * @param conf to read the serialized values from
+ * @return a map from column family to the the configured bloom filter type
*/
- static Map createFamilyCompressionMap(Configuration conf) {
- return createFamilyConfValueMap(conf, COMPRESSION_CONF_KEY);
+ @VisibleForTesting
+ static Map createFamilyBloomTypeMap(Configuration conf) {
+ Map stringMap = createFamilyConfValueMap(conf,
+ BLOOM_TYPE_FAMILIES_CONF_KEY);
+ Map bloomTypeMap = new TreeMap(Bytes.BYTES_COMPARATOR);
+ for (Map.Entry e : stringMap.entrySet()) {
+ BloomType bloomType = BloomType.valueOf(e.getValue());
+ bloomTypeMap.put(e.getKey(), bloomType);
+ }
+ return bloomTypeMap;
}
- private static Map createFamilyBloomMap(Configuration conf) {
- return createFamilyConfValueMap(conf, BLOOM_TYPE_CONF_KEY);
+ /**
+ * Runs inside the task to deserialize column family to block size
+ * map from the configuration.
+ *
+ * @param conf to read the serialized values from
+ * @return a map from column family to the configured block size
+ */
+ @VisibleForTesting
+ static Map createFamilyBlockSizeMap(Configuration conf) {
+ Map stringMap = createFamilyConfValueMap(conf,
+ BLOCK_SIZE_FAMILIES_CONF_KEY);
+ Map blockSizeMap = new TreeMap(Bytes.BYTES_COMPARATOR);
+ for (Map.Entry e : stringMap.entrySet()) {
+ Integer blockSize = Integer.parseInt(e.getValue());
+ blockSizeMap.put(e.getKey(), blockSize);
+ }
+ return blockSizeMap;
}
- private static Map createFamilyBlockSizeMap(Configuration conf) {
- return createFamilyConfValueMap(conf, BLOCK_SIZE_CONF_KEY);
+ /**
+ * Runs inside the task to deserialize column family to data block encoding
+ * type map from the configuration.
+ *
+ * @param conf to read the serialized values from
+ * @return a map from column family to HFileDataBlockEncoder for the
+ * configured data block type for the family
+ */
+ @VisibleForTesting
+ static Map createFamilyDataBlockEncodingMap(
+ Configuration conf) {
+ Map stringMap = createFamilyConfValueMap(conf,
+ DATABLOCK_ENCODING_FAMILIES_CONF_KEY);
+ Map encoderMap = new TreeMap(Bytes.BYTES_COMPARATOR);
+ for (Map.Entry e : stringMap.entrySet()) {
+ encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue())));
+ }
+ return encoderMap;
}
+
/**
* Run inside the task to deserialize column family to given conf value map.
*
- * @param conf
- * @param confName
+ * @param conf to read the serialized values from
+ * @param confName conf key to read from the configuration
* @return a map of column family to the given configuration value
*/
private static Map createFamilyConfValueMap(Configuration conf, String confName) {
@@ -449,13 +512,14 @@ public class HFileOutputFormat extends FileOutputFormat families = tableDescriptor.getFamilies();
+ int i = 0;
+ for (HColumnDescriptor familyDescriptor : families) {
+ if (i++ > 0) {
+ blockSizeConfigValue.append('&');
+ }
+ blockSizeConfigValue.append(URLEncoder.encode(
+ familyDescriptor.getNameAsString(), "UTF-8"));
+ blockSizeConfigValue.append('=');
+ blockSizeConfigValue.append(URLEncoder.encode(
+ String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
+ }
+ // Get rid of the last ampersand
+ conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}
/**
* Serialize column family to bloom type map to configuration.
* Invoked while configuring the MR job for incremental load.
*
+ * @param table to read the properties from
+ * @param conf to persist serialized values into
* @throws IOException
* on failure to read column family descriptors
*/
+ @VisibleForTesting
static void configureBloomType(HTable table, Configuration conf) throws IOException {
HTableDescriptor tableDescriptor = table.getTableDescriptor();
if (tableDescriptor == null) {
@@ -505,6 +605,44 @@ public class HFileOutputFormat extends FileOutputFormat families = tableDescriptor.getFamilies();
+ int i = 0;
+ for (HColumnDescriptor familyDescriptor : families) {
+ if (i++ > 0) {
+ dataBlockEncodingConfigValue.append('&');
+ }
+ dataBlockEncodingConfigValue.append(
+ URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
+ dataBlockEncodingConfigValue.append('=');
+ DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
+ if (encoding == null) {
+ encoding = DataBlockEncoding.NONE;
+ }
+ dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
+ "UTF-8"));
+ }
+ conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
+ dataBlockEncodingConfigValue.toString());
}
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
index ffe2081..6e48e91 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
@@ -33,9 +33,7 @@ import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
-
import junit.framework.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -64,6 +62,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
@@ -474,36 +473,40 @@ public class TestHFileOutputFormat {
}
/**
- * Test for
- * {@link HFileOutputFormat#createFamilyCompressionMap(Configuration)}. Tests
- * that the compression map is correctly deserialized from configuration
+ * Test for {@link HFileOutputFormat#configureCompression(HTable,
+ * Configuration)} and {@link HFileOutputFormat#createFamilyCompressionMap
+ * (Configuration)}.
+ * Tests that the compression map is correctly serialized into
+ * and deserialized from configuration
*
* @throws IOException
*/
@Test
- public void testCreateFamilyCompressionMap() throws IOException {
+ public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
for (int numCfs = 0; numCfs <= 3; numCfs++) {
Configuration conf = new Configuration(this.util.getConfiguration());
- Map familyToCompression = getMockColumnFamilies(numCfs);
+ Map familyToCompression =
+ getMockColumnFamiliesForCompression(numCfs);
HTable table = Mockito.mock(HTable.class);
- setupMockColumnFamilies(table, familyToCompression);
+ setupMockColumnFamiliesForCompression(table, familyToCompression);
HFileOutputFormat.configureCompression(table, conf);
// read back family specific compression setting from the configuration
- Map retrievedFamilyToCompressionMap = HFileOutputFormat.createFamilyCompressionMap(conf);
+ Map retrievedFamilyToCompressionMap = HFileOutputFormat
+ .createFamilyCompressionMap(conf);
// test that we have a value for all column families that matches with the
// used mock values
for (Entry entry : familyToCompression.entrySet()) {
- assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), entry.getValue()
- .getName(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
+ assertEquals("Compression configuration incorrect for column family:"
+ + entry.getKey(), entry.getValue(),
+ retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
}
}
}
- private void setupMockColumnFamilies(HTable table,
- Map familyToCompression) throws IOException
- {
+ private void setupMockColumnFamiliesForCompression(HTable table,
+ Map familyToCompression) throws IOException {
HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
for (Entry entry : familyToCompression.entrySet()) {
mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
@@ -515,21 +518,12 @@ public class TestHFileOutputFormat {
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
}
- private void setupMockStartKeys(HTable table) throws IOException {
- byte[][] mockKeys = new byte[][] {
- HConstants.EMPTY_BYTE_ARRAY,
- Bytes.toBytes("aaa"),
- Bytes.toBytes("ggg"),
- Bytes.toBytes("zzz")
- };
- Mockito.doReturn(mockKeys).when(table).getStartKeys();
- }
-
/**
* @return a map from column family names to compression algorithms for
* testing column family compression. Column family names have special characters
*/
- private Map getMockColumnFamilies(int numCfs) {
+ private Map
+ getMockColumnFamiliesForCompression (int numCfs) {
Map familyToCompression = new HashMap();
// use column family names having special characters
if (numCfs-- > 0) {
@@ -549,6 +543,238 @@ public class TestHFileOutputFormat {
/**
+ * Test for {@link HFileOutputFormat#configureBloomType(HTable,
+ * Configuration)} and {@link HFileOutputFormat#createFamilyBloomTypeMap
+ * (Configuration)}.
+ * Tests that the compression map is correctly serialized into
+ * and deserialized from configuration
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
+ for (int numCfs = 0; numCfs <= 2; numCfs++) {
+ Configuration conf = new Configuration(this.util.getConfiguration());
+ Map familyToBloomType =
+ getMockColumnFamiliesForBloomType(numCfs);
+ HTable table = Mockito.mock(HTable.class);
+ setupMockColumnFamiliesForBloomType(table,
+ familyToBloomType);
+ HFileOutputFormat.configureBloomType(table, conf);
+
+ // read back family specific data block encoding settings from the
+ // configuration
+ Map retrievedFamilyToBloomTypeMap =
+ HFileOutputFormat
+ .createFamilyBloomTypeMap(conf);
+
+ // test that we have a value for all column families that matches with the
+ // used mock values
+ for (Entry entry : familyToBloomType.entrySet()) {
+ assertEquals("BloomType configuration incorrect for column family:"
+ + entry.getKey(), entry.getValue(),
+ retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
+ }
+ }
+ }
+
+ private void setupMockColumnFamiliesForBloomType(HTable table,
+ Map familyToDataBlockEncoding) throws IOException {
+ HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+ for (Entry entry : familyToDataBlockEncoding.entrySet()) {
+ mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+ .setMaxVersions(1)
+ .setBloomFilterType(entry.getValue())
+ .setBlockCacheEnabled(false)
+ .setTimeToLive(0));
+ }
+ Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+ }
+
+ /**
+ * @return a map from column family names to compression algorithms for
+ * testing column family compression. Column family names have special characters
+ */
+ private Map
+ getMockColumnFamiliesForBloomType (int numCfs) {
+ Map familyToBloomType =
+ new HashMap();
+ // use column family names having special characters
+ if (numCfs-- > 0) {
+ familyToBloomType.put("Family1!@#!@#&", BloomType.ROW);
+ }
+ if (numCfs-- > 0) {
+ familyToBloomType.put("Family2=asdads&!AASD",
+ BloomType.ROWCOL);
+ }
+ if (numCfs-- > 0) {
+ familyToBloomType.put("Family3", BloomType.NONE);
+ }
+ return familyToBloomType;
+ }
+
+ /**
+ * Test for {@link HFileOutputFormat#configureBlockSize(HTable,
+ * Configuration)} and {@link HFileOutputFormat#createFamilyBlockSizeMap
+ * (Configuration)}.
+ * Tests that the compression map is correctly serialized into
+ * and deserialized from configuration
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException {
+ for (int numCfs = 0; numCfs <= 3; numCfs++) {
+ Configuration conf = new Configuration(this.util.getConfiguration());
+ Map familyToBlockSize =
+ getMockColumnFamiliesForBlockSize(numCfs);
+ HTable table = Mockito.mock(HTable.class);
+ setupMockColumnFamiliesForBlockSize(table,
+ familyToBlockSize);
+ HFileOutputFormat.configureBlockSize(table, conf);
+
+ // read back family specific data block encoding settings from the
+ // configuration
+ Map retrievedFamilyToBlockSizeMap =
+ HFileOutputFormat
+ .createFamilyBlockSizeMap(conf);
+
+ // test that we have a value for all column families that matches with the
+ // used mock values
+ for (Entry entry : familyToBlockSize.entrySet()
+ ) {
+ assertEquals("BlockSize configuration incorrect for column family:"
+ + entry.getKey(), entry.getValue(),
+ retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes()));
+ }
+ }
+ }
+
+ private void setupMockColumnFamiliesForBlockSize(HTable table,
+ Map familyToDataBlockEncoding) throws IOException {
+ HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+ for (Entry entry : familyToDataBlockEncoding.entrySet()) {
+ mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+ .setMaxVersions(1)
+ .setBlocksize(entry.getValue())
+ .setBlockCacheEnabled(false)
+ .setTimeToLive(0));
+ }
+ Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+ }
+
+ /**
+ * @return a map from column family names to compression algorithms for
+ * testing column family compression. Column family names have special characters
+ */
+ private Map
+ getMockColumnFamiliesForBlockSize (int numCfs) {
+ Map familyToBlockSize =
+ new HashMap();
+ // use column family names having special characters
+ if (numCfs-- > 0) {
+ familyToBlockSize.put("Family1!@#!@#&", 1234);
+ }
+ if (numCfs-- > 0) {
+ familyToBlockSize.put("Family2=asdads&!AASD",
+ Integer.MAX_VALUE);
+ }
+ if (numCfs-- > 0) {
+ familyToBlockSize.put("Family2=asdads&!AASD",
+ Integer.MAX_VALUE);
+ }
+ if (numCfs-- > 0) {
+ familyToBlockSize.put("Family3", 0);
+ }
+ return familyToBlockSize;
+ }
+
+ /**
+ * Test for {@link HFileOutputFormat#configureDataBlockEncoding(HTable,
+ * Configuration)} and {@link HFileOutputFormat#createFamilyDataBlockEncodingMap
+ * (Configuration)}.
+ * Tests that the compression map is correctly serialized into
+ * and deserialized from configuration
+ *
+ * @throws IOException
+ */
+ @Test
+ public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
+ for (int numCfs = 0; numCfs <= 3; numCfs++) {
+ Configuration conf = new Configuration(this.util.getConfiguration());
+ Map familyToDataBlockEncoding =
+ getMockColumnFamiliesForDataBlockEncoding(numCfs);
+ HTable table = Mockito.mock(HTable.class);
+ setupMockColumnFamiliesForDataBlockEncoding(table,
+ familyToDataBlockEncoding);
+ HFileOutputFormat.configureDataBlockEncoding(table, conf);
+
+ // read back family specific data block encoding settings from the
+ // configuration
+ Map retrievedFamilyToDataBlockEncodingMap =
+ HFileOutputFormat
+ .createFamilyDataBlockEncodingMap(conf);
+
+ // test that we have a value for all column families that matches with the
+ // used mock values
+ for (Entry entry : familyToDataBlockEncoding.entrySet()) {
+ assertEquals("DataBlockEncoding configuration incorrect for column family:"
+ + entry.getKey(), entry.getValue(),
+ retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
+ }
+ }
+ }
+
+ private void setupMockColumnFamiliesForDataBlockEncoding(HTable table,
+ Map familyToDataBlockEncoding) throws IOException {
+ HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
+ for (Entry entry : familyToDataBlockEncoding.entrySet()) {
+ mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
+ .setMaxVersions(1)
+ .setDataBlockEncoding(entry.getValue())
+ .setBlockCacheEnabled(false)
+ .setTimeToLive(0));
+ }
+ Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
+ }
+
+ /**
+ * @return a map from column family names to compression algorithms for
+ * testing column family compression. Column family names have special characters
+ */
+ private Map
+ getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
+ Map familyToDataBlockEncoding =
+ new HashMap();
+ // use column family names having special characters
+ if (numCfs-- > 0) {
+ familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF);
+ }
+ if (numCfs-- > 0) {
+ familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+ DataBlockEncoding.FAST_DIFF);
+ }
+ if (numCfs-- > 0) {
+ familyToDataBlockEncoding.put("Family2=asdads&!AASD",
+ DataBlockEncoding.PREFIX);
+ }
+ if (numCfs-- > 0) {
+ familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
+ }
+ return familyToDataBlockEncoding;
+ }
+
+ private void setupMockStartKeys(HTable table) throws IOException {
+ byte[][] mockKeys = new byte[][] {
+ HConstants.EMPTY_BYTE_ARRAY,
+ Bytes.toBytes("aaa"),
+ Bytes.toBytes("ggg"),
+ Bytes.toBytes("zzz")
+ };
+ Mockito.doReturn(mockKeys).when(table).getStartKeys();
+ }
+
+ /**
* Test that {@link HFileOutputFormat} RecordWriter uses compression and
* bloom filter settings from the column family descriptor
*/