Index: src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (revision 1203927) +++ src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (working copy) @@ -115,11 +115,11 @@ } /** - * @param region Encoded name of region to flush. + * @param encodedRegionName Encoded name of region to flush. */ - private void scheduleFlush(final byte [] region) { + private void scheduleFlush(final byte [] encodedRegionName) { boolean scheduled = false; - HRegion r = this.services.getFromOnlineRegions(Bytes.toString(region)); + HRegion r = this.services.getFromOnlineRegions(Bytes.toString(encodedRegionName)); FlushRequester requester = null; if (r != null) { requester = this.services.getFlushRequester(); @@ -129,8 +129,9 @@ } } if (!scheduled) { - LOG.warn("Failed to schedule flush of " + - Bytes.toString(region) + "r=" + r + ", requester=" + requester); + LOG.warn("Failed to schedule flush of " + + Bytes.toString(encodedRegionName) + ", region=" + r + ", requester=" + + requester); } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1203927) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -183,8 +183,8 @@ Collections.synchronizedSortedMap(new TreeMap()); /* - * Map of regions to most recent sequence/edit id in their memstore. - * Key is encoded region name. + * Map of encoded region names to their most recent sequence/edit id in their + * memstore. */ private final ConcurrentSkipListMap lastSeqWritten = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); @@ -713,9 +713,8 @@ // If too many log files, figure which regions we need to flush. // Array is an array of encoded region names. byte [][] regions = null; - int logCount = this.outputfiles.size(); - if (logCount > this.maxLogs && this.outputfiles != null && - this.outputfiles.size() > 0) { + int logCount = this.outputfiles == null? 0: this.outputfiles.size(); + if (logCount > this.maxLogs && logCount > 0) { // This is an array of encoded region names. regions = findMemstoresWithEditsEqualOrOlderThan(this.outputfiles.firstKey(), this.lastSeqWritten); @@ -737,7 +736,7 @@ * Return regions (memstores) that have edits that are equal or less than * the passed oldestWALseqid. * @param oldestWALseqid - * @param regionsToSeqids + * @param regionsToSeqids Encoded region names to sequence ids * @return All regions whose seqid is < than oldestWALseqid (Not * necessarily in order). Null if no regions found. */ @@ -748,6 +747,7 @@ for (Map.Entry e: regionsToSeqids.entrySet()) { if (e.getValue().longValue() <= oldestWALseqid) { if (regions == null) regions = new ArrayList(); + // Key is encoded region name. regions.add(e.getKey()); } } @@ -770,6 +770,7 @@ byte [] oldestRegion = null; for (Map.Entry e: this.lastSeqWritten.entrySet()) { if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) { + // Key is encoded region name. oldestRegion = e.getKey(); break; } @@ -1015,9 +1016,9 @@ // is greater than or equal to the value in lastSeqWritten. // Use encoded name. Its shorter, guaranteed unique and a subset of // actual name. - byte [] hriKey = info.getEncodedNameAsBytes(); - this.lastSeqWritten.putIfAbsent(hriKey, seqNum); - HLogKey logKey = makeKey(hriKey, tableName, seqNum, now, clusterId); + byte [] encodedRegionName = info.getEncodedNameAsBytes(); + this.lastSeqWritten.putIfAbsent(encodedRegionName, seqNum); + HLogKey logKey = makeKey(encodedRegionName, tableName, seqNum, now, clusterId); doWrite(info, logKey, edits, htd); this.numEntries.incrementAndGet(); if (htd.isDeferredLogFlush()) { @@ -1358,6 +1359,11 @@ // Cleaning up of lastSeqWritten is in the finally clause because we // don't want to confuse getOldestOutstandingSeqNum() this.lastSeqWritten.remove(getSnapshotName(encodedRegionName)); + Long l = this.lastSeqWritten.remove(encodedRegionName); + if (l != null) { + LOG.warn("Why is there a raw encodedRegionName in lastSeqWritten? name=" + + Bytes.toString(encodedRegionName) + ", seqid=" + l); + } this.cacheFlushLock.unlock(); } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1203927) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -1169,7 +1169,7 @@ long currentMemStoreSize = 0; List storeFlushers = new ArrayList(stores.size()); try { - sequenceId = (wal == null)? myseqid : + sequenceId = (wal == null)? myseqid: wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes()); completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId); @@ -1186,7 +1186,7 @@ } status.setStatus("Flushing stores"); - LOG.debug("Finished snapshotting, commencing flushing stores"); + LOG.debug("Finished snapshotting " + this + ", commencing flushing stores"); // Any failure from here on out will be catastrophic requiring server // restart so hlog content can be replayed and put back into the memstore.