diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index d6442a4..ce5411b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -68,6 +68,8 @@ implements Configurable { public static final String SCAN_CACHEBLOCKS = "hbase.mapreduce.scan.cacheblocks"; /** The number of rows for caching that will be passed to scanners. */ public static final String SCAN_CACHEDROWS = "hbase.mapreduce.scan.cachedrows"; + /** Set the maximum number of values to return for each call to next(). */ + public static final String SCAN_BATCHSIZE = "hbase.mapreduce.scan.batchsize"; /** The configuration. */ private Configuration conf = null; @@ -143,8 +145,8 @@ implements Configurable { scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS))); } - if (conf.get(SCAN_CACHEDROWS) != null) { - scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS))); + if (conf.get(SCAN_BATCHSIZE) != null) { + scan.setBatch(Integer.parseInt(conf.get(SCAN_BATCHSIZE))); } // false by default, full table scans generate too much BC churn