Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (revision 11252) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (working copy) @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; @@ -44,6 +45,7 @@ HBaseTestingUtility htu = new HBaseTestingUtility(); Configuration conf = htu.getConfiguration(); + conf.setBoolean("hbase.rs.cacheblocksonwrite", true); FileSystem fs = FileSystem.get(conf); byte [] table = Bytes.toBytes("table"); byte [][] families = new byte [][] { @@ -71,6 +73,12 @@ } HRegion region = HRegion.createHRegion(hri, path, conf); + // LRU should be empty + LruBlockCache lru = (LruBlockCache)StoreFile.getBlockCache(conf); + assertEquals(0, lru.size()); + assertEquals(0, lru.getStats().getHitCount()); + assertEquals(0, lru.getStats().getMissCount()); + Increment odd = new Increment(rows[0]); Increment even = new Increment(rows[0]); Increment all = new Increment(rows[0]); @@ -84,6 +92,11 @@ for (int i=0;i<5;i++) region.increment(odd, null, false); region.flushcache(); + // verify one block cached + assertEquals(0, lru.getStats().getHitCount()); + assertEquals(0, lru.getStats().getMissCount()); + assertEquals(1, lru.size()); + // increment even qualifiers 5 times for (int i=0;i<5;i++) region.increment(even, null, false); @@ -96,5 +109,10 @@ assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i])); assertEquals(6, Bytes.toLong(kvs[i].getValue())); } + + // should still only have one block cached and no misses w/ 2 hits + assertEquals(1, lru.size()); + assertEquals(2, lru.getStats().getHitCount()); + assertEquals(0, lru.getStats().getMissCount()); } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/Store.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 11252) +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy) @@ -464,7 +464,8 @@ final long logCacheFlushId, TimeRangeTracker snapshotTimeRangeTracker) throws IOException { - StoreFile.Writer writer = null; + StoreFile.Writer writer; + String fileName; long flushed = 0; // Don't flush if there are no entries. if (set.size() == 0) { @@ -478,6 +479,7 @@ // A. Write the map out to the disk writer = createWriterInTmp(set.size()); writer.setTimeRangeTracker(snapshotTimeRangeTracker); + fileName = writer.getPath().getName(); int entries = 0; try { for (KeyValue kv: set) { @@ -496,7 +498,7 @@ } // Write-out finished successfully, move into the right spot - Path dstPath = StoreFile.getUniqueFile(fs, homedir); + Path dstPath = new Path(homedir, fileName); LOG.info("Renaming flushed file at " + writer.getPath() + " to " + dstPath); if (!fs.rename(writer.getPath(), dstPath)) { LOG.warn("Unable to rename " + writer.getPath() + " to " + dstPath); @@ -1050,15 +1052,15 @@ // be if all cells were expired or deleted). StoreFile result = null; if (compactedFile != null) { - Path p = null; - try { - p = StoreFile.rename(this.fs, compactedFile.getPath(), - StoreFile.getRandomFilename(fs, this.homedir)); - } catch (IOException e) { - LOG.error("Failed move of compacted file " + compactedFile.getPath(), e); - return null; + // Move file into the right spot + Path origPath = compactedFile.getPath(); + Path dstPath = new Path(homedir, origPath.getName()); + LOG.info("Renaming compacted file at " + origPath + " to " + dstPath); + if (!fs.rename(origPath, dstPath)) { + LOG.error("Failed move of compacted file " + origPath + " to " + + dstPath); } - result = new StoreFile(this.fs, p, blockcache, this.conf, + result = new StoreFile(this.fs, dstPath, blockcache, this.conf, this.family.getBloomFilterType(), this.inMemory); result.createReader(); } Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 11252) +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Random; import java.util.SortedSet; +import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -161,7 +162,7 @@ * this files id. Group 2 the referenced region name, etc. */ private static final Pattern REF_NAME_PARSER = - Pattern.compile("^(\\d+)(?:\\.(.+))?$"); + Pattern.compile("^([0-9a-f]+)(?:\\.(.+))?$"); // StoreFile.Reader private volatile Reader reader; @@ -588,7 +589,7 @@ throw new IOException("Expecting " + dir.toString() + " to be a directory"); } - return fs.getFileStatus(dir).isDir()? getRandomFilename(fs, dir): dir; + return getRandomFilename(fs, dir); } /** @@ -615,14 +616,8 @@ final Path dir, final String suffix) throws IOException { - long id = -1; - Path p = null; - do { - id = Math.abs(rand.nextLong()); - p = new Path(dir, Long.toString(id) + - ((suffix == null || suffix.length() <= 0)? "": suffix)); - } while(fs.exists(p)); - return p; + return new Path(dir, UUID.randomUUID().toString().replaceAll("-", "") + + ((suffix == null || suffix.length() <= 0) ? "" : suffix)); } /** Index: src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (revision 11252) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (working copy) @@ -382,7 +382,7 @@ null), blocksize, compress, comparator); this.closeOutputStream = true; - this.name = path.toString(); + this.name = path.getName(); this.path = path; this.blockCache = blockCache; this.maxBytesToCache = maxBytesToCache; @@ -457,7 +457,7 @@ byte [] bytes = baos.toByteArray(); ByteBuffer blockToCache = ByteBuffer.wrap(bytes, DATABLOCKMAGIC.length, bytes.length - DATABLOCKMAGIC.length); - String blockName = path.toString() + blockNumber; + String blockName = name + blockNumber; blockCache.cacheBlock(blockName, blockToCache); baosDos.close(); cachedBlocks++; @@ -588,8 +588,8 @@ @Override public String toString() { - return "writer=" + this.name + ", compression=" + - this.compressAlgo.getName(); + return "writer=" + this.path.toString() + ", name=" + this.name + + ", compression=" + this.compressAlgo.getName(); } /** @@ -848,10 +848,9 @@ // Whether blocks of file should be evicted on close of file private final boolean evictOnClose; - // Name for this object used when logging or in toString. Is either - // the result of a toString on the stream or else is toString of passed - // file Path plus metadata key/value pairs. - protected String name; + // Path of file and file name to be used for block names + private final Path path; + private final String name; /** * Opens a HFile. You must load the file info before you can @@ -868,7 +867,6 @@ this(path, fs.open(path), fs.getFileStatus(path).getLen(), cache, inMemory, evictOnClose); this.closeIStream = true; - this.name = path.toString(); } /** @@ -890,14 +888,15 @@ this.fileSize = size; this.istream = fsdis; this.closeIStream = false; - this.name = path.toString(); this.inMemory = inMemory; this.evictOnClose = evictOnClose; + this.path = path; + this.name = path.getName(); } @Override public String toString() { - return "reader=" + this.name + + return "reader=" + this.path.toString() + (!isFileInfoLoaded()? "": ", compression=" + this.compressAlgo.getName() + ", inMemory=" + this.inMemory + @@ -1345,6 +1344,10 @@ return name; } + public Path getPath() { + return path; + } + /* * Implementation of {@link HFileScanner} interface. */