diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index f8d13bd..69f9276 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -147,7 +147,11 @@ public class TableRecordReaderImpl { InterruptedException { if (context != null) { this.context = context; - getCounter = retrieveGetCounterWithStringsParams(context); + if (context.getConfiguration().getBoolean(TableSnapshotInputFormatImpl.SNAPSHOT_SCAN_METRICS_KEY, + TableSnapshotInputFormatImpl.DEFAULT_SNAPSHOT_SCAN_METRICS)) { + getCounter = retrieveGetCounterWithStringsParams(context); + } + } restart(scan.getStartRow()); } @@ -247,6 +251,10 @@ public class TableRecordReaderImpl { } updateCounters(); + if (getCounter != null) { + updateCounters(); + } + return false; } catch (IOException ioe) { if (logScannerActivity) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index 1795ce1..241ead6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -77,6 +77,15 @@ public class TableSnapshotInputFormatImpl { "hbase.tablesnapshotinputformat.locality.cutoff.multiplier"; private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f; + protected static final String SNAPSHOT_SCAN_METRICS_KEY = "hbase.TableSnapshotInputFormat.scan.metrics"; + + protected static final boolean DEFAULT_SNAPSHOT_SCAN_METRICS = true; + + protected static final String SNAPSHOT_UUID_IN_RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.uuid.in.restore.dir"; + + protected static final boolean DEFAULT_SNAPSHOT_UUID_IN_RESTORE_DIR = true; + + /** * For MapReduce jobs running multiple mappers per region, determines * what split algorithm we should be using to find split points for scanners. @@ -231,7 +240,12 @@ public class TableSnapshotInputFormatImpl { scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); // disable caching of data blocks scan.setCacheBlocks(false); - scan.setScanMetricsEnabled(true); + + if (conf.getBoolean(SNAPSHOT_SCAN_METRICS_KEY, DEFAULT_SNAPSHOT_SCAN_METRICS)) { + scan.setScanMetricsEnabled(true); + } else { + scan.setScanMetricsEnabled(false); + } scanner = new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null); @@ -525,7 +539,9 @@ public class TableSnapshotInputFormatImpl { Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - restoreDir = new Path(restoreDir, UUID.randomUUID().toString()); + if (conf.getBoolean(SNAPSHOT_UUID_IN_RESTORE_DIR_KEY, DEFAULT_SNAPSHOT_UUID_IN_RESTORE_DIR)) { + restoreDir = new Path(restoreDir, UUID.randomUUID().toString()); + } // TODO: restore from record readers to parallelize. RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index ac1af91..c44b64a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -131,6 +131,13 @@ public class CacheConfig { "hbase.rs.prefetchblocksonopen"; /** + * Configuration key to override prefetch all blocks of a given file into the block cache + * when the file is opened, overriding column family descriptor setting + */ + public static final String PREFETCH_BLOCKS_ON_OPEN_OVERRIDE_KEY = + "hbase.rs.prefetchblocksonopen.override"; + + /** * The target block size used by blockcache instances. Defaults to * {@link HConstants#DEFAULT_BLOCKSIZE}. * TODO: this config point is completely wrong, as it's used to determine the @@ -232,6 +239,8 @@ public class CacheConfig { conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE) || family.isEvictBlocksOnClose(), conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), + conf.get(PREFETCH_BLOCKS_ON_OPEN_OVERRIDE_KEY) != null ? + Boolean.parseBoolean(conf.get(PREFETCH_BLOCKS_ON_OPEN_OVERRIDE_KEY)) : conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN) || family.isPrefetchBlocksOnOpen(), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) @@ -256,7 +265,8 @@ public class CacheConfig { conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE), conf.getBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, DEFAULT_EVICT_ON_CLOSE), conf.getBoolean(CACHE_DATA_BLOCKS_COMPRESSED_KEY, DEFAULT_CACHE_DATA_COMPRESSED), - conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN), + conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_OVERRIDE_KEY, + conf.getBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, DEFAULT_PREFETCH_ON_OPEN)), conf.getBoolean(DROP_BEHIND_CACHE_COMPACTION_KEY, DROP_BEHIND_CACHE_COMPACTION_DEFAULT) ); LOG.info("Created cacheConfig: " + this);