diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 4706b7d..db02855 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -363,7 +363,7 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi int priority, CompactionRequest request) throws IOException { CompactionContext compaction = s.requestCompaction(priority, request); if (compaction == null) { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("Not compacting " + r.getRegionInfo().getRegionNameAsString() + " because compaction request was cancelled"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d6724ba..37058f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1617,6 +1617,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } ByteString regionName = entries.get(0).getKey().getEncodedRegionName(); Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8()); + if (LOG.isDebugEnabled()) { + LOG.debug(region.getRegionInfo().getEncodedName() + + ", firstSequenceId=" + entries.get(0).getKey().getLogSequenceNumber() + + ", lastSequenceId=" + entries.get(entries.size() - 1).getKey().getLogSequenceNumber()); + } RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() @@ -1653,7 +1658,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } walEntries.add(walEntry); } - if(edits!=null && !edits.isEmpty()) { + if (edits != null && !edits.isEmpty()) { long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber(); OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index eeffa8b..2d40ffe4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -120,7 +120,7 @@ public class SplitLogWorker implements Runnable { LOG.warn("log splitting of " + filename + " interrupted, resigning", e); return Status.RESIGNED; } - LOG.warn("log splitting of " + filename + " failed, returning error", e); + LOG.warn("log splitting of " + filename + " FAILED, returning error", e); return Status.ERR; } return Status.DONE; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 1314a4d..07b6c11 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -103,8 +103,7 @@ public class WALEditsReplaySink { } int batchSize = entries.size(); - Map> entriesByRegion = - new HashMap>(); + Map> entriesByRegion = new HashMap>(); HRegionLocation loc = null; Entry entry = null; List regionEntries = null; @@ -122,10 +121,11 @@ public class WALEditsReplaySink { } long startTime = EnvironmentEdgeManager.currentTime(); - + + HRegionInfo curRegion = null; // replaying edits by region for (Map.Entry> _entry : entriesByRegion.entrySet()) { - HRegionInfo curRegion = _entry.getKey(); + curRegion = _entry.getKey(); List allActions = _entry.getValue(); // send edits in chunks int totalActions = allActions.size(); @@ -141,8 +141,9 @@ public class WALEditsReplaySink { } long endTime = EnvironmentEdgeManager.currentTime() - startTime; - LOG.debug("number of rows:" + entries.size() + " are sent by batch! spent " + endTime - + "(ms)!"); + LOG.debug("Replayed " + batchSize + " edits in " + endTime + "ms" + + (loc == null? "": " into loc={" + loc + "}") + (entry == null? "": ", lastSequenceId=" + + entry.getKey().getLogSeqNum())); metrics.updateReplayTime(endTime); metrics.updateReplayBatchSize(batchSize); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 4958bde..f603df7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -287,8 +287,8 @@ public class WALSplitter { Reader in = null; try { long logLength = logfile.getLen(); - LOG.info("Splitting wal: " + logPath + ", length=" + logLength); - LOG.info("DistributedLogReplay = " + this.distributedLogReplay); + LOG.info("Splitting WAL (DistributedLogReplay=" + this.distributedLogReplay +"): " + + logPath + ", length=" + logLength); status.setStatus("Opening log file"); if (reporter != null && !reporter.progress()) { progress_failed = true; @@ -1887,7 +1887,7 @@ public class WALSplitter { Long cachedLastFlushedSequenceId = lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName()); - // retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will + // Retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will // update the value for the region RegionStoreSequenceIds ids = csm.getSplitLogWorkerCoordination().getRegionFlushedSequenceId(failedServerName, @@ -1904,6 +1904,7 @@ public class WALSplitter { if (cachedLastFlushedSequenceId == null || lastFlushedSequenceId > cachedLastFlushedSequenceId) { + LOG.info("REMOVE UPDATE LAST FLUSHED SID: " + loc.getRegionInfo().getEncodedName() + " " + lastFlushedSequenceId); lastFlushedSequenceIds.put(loc.getRegionInfo().getEncodedName(), lastFlushedSequenceId); } }