Index: src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (revision 231069) +++ src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (revision ) @@ -19,20 +19,6 @@ */ package org.apache.hadoop.hbase.mapreduce; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLDecoder; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.UUID; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -66,6 +52,20 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.UUID; + /** * Writes HFiles. Passed KeyValues must arrive in order. * Currently, can only write files to a single column family at a @@ -78,6 +78,7 @@ public class HFileOutputFormat extends FileOutputFormat { static Log LOG = LogFactory.getLog(HFileOutputFormat.class); static final String COMPRESSION_CONF_KEY = "hbase.hfileoutputformat.families.compression"; + static final String COMPRESSION_BASE_ON_CFNAME = "hbase.hfileoutputformat.compression.baseon.families"; TimeRangeTracker trt = new TimeRangeTracker(); private static final String DATABLOCK_ENCODING_CONF_KEY = "hbase.mapreduce.hfileoutputformat.datablock.encoding"; @@ -369,7 +370,9 @@ DistributedCache.createSymlink(conf); // Set compression algorithms based on column families + if(conf.getBoolean(COMPRESSION_BASE_ON_CFNAME, true)) { - configureCompression(table, conf); + configureCompression(table, conf); + } TableMapReduceUtil.addDependencyJars(job); LOG.info("Incremental table output configured.");