commit 060d443bc454ea36108d032c9a057cda2a7ac1ae Author: Todd Lipcon Date: Mon Mar 21 19:00:57 2011 -0700 Make NMapInputFormat use a different config param diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index 563ee57..f4b3f65 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; @@ -34,13 +35,13 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Input format that creates as many map tasks as configured in - * mapred.map.tasks, each provided with a single row of - * NullWritables. This can be useful when trying to write mappers - * which don't have any real input (eg when the mapper is simply - * producing random data as output) + * Input format that creates a configurable number of map tasks + * each provided with a single row of NullWritables. This can be + * useful when trying to write mappers which don't have any real + * input (eg when the mapper is simply producing random data as output) */ public class NMapInputFormat extends InputFormat { + private static final String NMAPS_KEY = "nmapinputformat.num.maps"; @Override public RecordReader createRecordReader( @@ -53,7 +54,7 @@ public class NMapInputFormat extends InputFormat { @Override public List getSplits(JobContext context) throws IOException, InterruptedException { - int count = context.getConfiguration().getInt("mapred.map.tasks", 1); + int count = getNumMapTasks(context.getConfiguration()); List splits = new ArrayList(count); for (int i = 0; i < count; i++) { splits.add(new NullInputSplit()); @@ -61,6 +62,14 @@ public class NMapInputFormat extends InputFormat { return splits; } + public static void setNumMapTasks(Configuration conf, int numTasks) { + conf.setInt(NMAPS_KEY, numTasks); + } + + public static int getNumMapTasks(Configuration conf) { + return conf.getInt(NMAPS_KEY, 1); + } + private static class NullInputSplit extends InputSplit implements Writable { @Override public long getLength() throws IOException, InterruptedException { diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 5760d48..ecf7223 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -62,8 +62,7 @@ import org.mockito.Mockito; * Simple test for {@link KeyValueSortReducer} and {@link HFileOutputFormat}. * Sets up and runs a mapreduce job that writes hfile output. * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. Makes - * as many splits as "mapred.map.tasks" maps. + * emits keys and values like those of {@link PerformanceEvaluation}. */ public class TestHFileOutputFormat { private final static int ROWSPERSPLIT = 1024; @@ -325,7 +324,7 @@ public class TestHFileOutputFormat { new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table); // Ensure data shows up - int expectedRows = conf.getInt("mapred.map.tasks", 1) * ROWSPERSPLIT; + int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table)); Scan scan = new Scan();