From 7883afa29e5393875f9a93f0e548623f25be2f99 Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Thu, 15 Mar 2018 20:18:46 -0700 Subject: [PATCH] HBASE-20213 [LOGGING] Aligning formatting and logging less (compactions, in-memory compactions) Log less. Log using same format as used elsewhere in log. Align logs in HFileArchiver with how we format elsewhere. Removed redundant 'region' qualifiers, tried to tighten up the emissions so easier to read the long lines. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java Add a label for each of the chunkcreators we make (I was confused by two chunk creater stats emissions in log file -- didn't know that one was for data and the other index). M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java Formatting. Log less. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java Make the emissions in here trace-level. When more than a few regions, log is filled with this stuff. --- .../apache/hadoop/hbase/backup/HFileArchiver.java | 46 +++++++++----------- .../master/procedure/MasterProcedureScheduler.java | 2 +- .../hadoop/hbase/regionserver/ChunkCreator.java | 50 +++++++++++----------- .../hadoop/hbase/regionserver/CompactSplit.java | 16 +++---- .../hbase/regionserver/CompactingMemStore.java | 5 +-- .../apache/hadoop/hbase/regionserver/HRegion.java | 2 +- .../regionserver/MemStoreCompactionStrategy.java | 8 ++-- .../hbase/regionserver/MemStoreCompactor.java | 6 +-- .../hadoop/hbase/regionserver/StoreScanner.java | 7 ++- .../hbase/regionserver/compactions/Compactor.java | 17 ++++---- 10 files changed, 74 insertions(+), 85 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index dea6bf819d..0dcaf4c4d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -109,9 +109,7 @@ public class HFileArchiver { */ public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("ARCHIVING " + regionDir.toString()); - } + LOG.debug("ARCHIVING {}", rootdir.toString()); // otherwise, we archive the files // make sure we can archive @@ -145,7 +143,7 @@ public class HFileArchiver { FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden); // if there no files, we can just delete the directory and return; if (storeDirs == null) { - LOG.debug("Region directory " + regionDir + " empty."); + LOG.debug("Directory " + regionDir + " empty."); return deleteRegionWithoutArchiving(fs, regionDir); } @@ -194,7 +192,7 @@ public class HFileArchiver { RegionInfo parent, Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir); if (storeFiles == null) { - LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() + + LOG.debug("No files to dispose of in " + parent.getRegionNameAsString() + ", family=" + Bytes.toString(family)); return; } @@ -230,7 +228,7 @@ public class HFileArchiver { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:" + LOG.warn("Passed filesystem is null, so just deleting files without archiving for " + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; @@ -238,7 +236,7 @@ public class HFileArchiver { // short circuit if we don't have any files to delete if (compactedFiles.isEmpty()) { - LOG.debug("No store files to dispose, done!"); + LOG.debug("No files to dispose of, done!"); return; } @@ -255,7 +253,7 @@ public class HFileArchiver { } // otherwise we attempt to archive the store files - LOG.debug("Archiving compacted store files."); + LOG.debug("Archiving compacted files."); // Wrap the storefile into a File StoreToFile getStorePath = new StoreToFile(fs); @@ -321,7 +319,7 @@ public class HFileArchiver { // short circuit if no files to move if (toArchive.isEmpty()) return Collections.emptyList(); - if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir); + if (LOG.isTraceEnabled()) LOG.trace("Moving files to the archive directory: " + baseArchiveDir); // make sure the archive directory exists if (!fs.exists(baseArchiveDir)) { @@ -337,7 +335,7 @@ public class HFileArchiver { for (File file : toArchive) { // if its a file archive it try { - if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file); + if (LOG.isTraceEnabled()) LOG.trace("Archiving " + file); if (file.isFile()) { // attempt to archive the file if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) { @@ -355,7 +353,7 @@ public class HFileArchiver { failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start)); } } catch (IOException e) { - LOG.warn("Failed to archive " + file, e); + LOG.warn("Failed to archive {}", file, e); failures.add(file); } } @@ -385,10 +383,8 @@ public class HFileArchiver { // really, really unlikely situtation, where we get the same name for the existing file, but // is included just for that 1 in trillion chance. if (fs.exists(archiveFile)) { - if (LOG.isDebugEnabled()) { - LOG.debug("File:" + archiveFile + " already exists in archive, moving to " - + "timestamped backup and overwriting current."); - } + LOG.debug("{} already exists in archive, moving to timestamped backup and " + + "overwriting current.", archiveFile); // move the archive file to the stamped backup Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime); @@ -406,7 +402,7 @@ public class HFileArchiver { } if (LOG.isTraceEnabled()) { - LOG.trace("No existing file in archive for: " + archiveFile + + LOG.trace("No existing file in archive for " + archiveFile + ", free to archive original file."); } @@ -421,11 +417,11 @@ public class HFileArchiver { try { if (!fs.exists(archiveDir)) { if (fs.mkdirs(archiveDir)) { - LOG.debug("Created archive directory:" + archiveDir); + LOG.debug("Created archive directory " + archiveDir); } } } catch (IOException e) { - LOG.warn("Failed to create directory: " + archiveDir, e); + LOG.warn("Failed to create directory " + archiveDir, e); } } @@ -446,9 +442,7 @@ public class HFileArchiver { return false; } - if (LOG.isDebugEnabled()) { - LOG.debug("Finished archiving from " + currentFile + ", to " + archiveFile); - } + LOG.debug("Archived from {} to {}", currentFile, archiveFile); return true; } @@ -462,10 +456,10 @@ public class HFileArchiver { private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) throws IOException { if (fs.delete(regionDir, true)) { - LOG.debug("Deleted " + regionDir); + LOG.debug("Deleted {}", regionDir); return true; } - LOG.debug("Failed to delete region directory:" + regionDir); + LOG.debug("Failed to delete directory {}", regionDir); return false; } @@ -480,13 +474,13 @@ public class HFileArchiver { */ private static void deleteStoreFilesWithoutArchiving(Collection compactedFiles) throws IOException { - LOG.debug("Deleting store files without archiving."); + LOG.debug("Deleting files without archiving."); List errors = new ArrayList<>(0); for (HStoreFile hsf : compactedFiles) { try { hsf.deleteStoreFile(); } catch (IOException e) { - LOG.error("Failed to delete store file:" + hsf.getPath()); + LOG.error("Failed to delete {}", hsf.getPath()); errors.add(e); } } @@ -605,7 +599,7 @@ public class HFileArchiver { @Override public String toString() { - return this.getClass() + ", file:" + getPath().toString(); + return this.getClass().getSimpleName() + ", " + getPath().toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index eab06a2ea8..1a393073d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -633,7 +633,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { boolean hasLock = true; final LockAndQueue[] regionLocks = new LockAndQueue[regionInfo.length]; for (int i = 0; i < regionInfo.length; ++i) { - LOG.info(procedure + ", " + regionInfo[i].getRegionNameAsString()); + LOG.info("{} checking lock on {}", procedure, regionInfo[i].getEncodedName()); assert table != null; assert regionInfo[i] != null; assert regionInfo[i].getTable() != null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index 1f9f4b822d..518d3ed660 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.lang.ref.WeakReference; -import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; @@ -38,8 +36,8 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObse import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Does the management of memstoreLAB chunk creations. A monotonically incrementing id is associated @@ -100,13 +98,13 @@ public class ChunkCreator { float poolSizePercentage, float indexChunkSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager) { - this.dataChunksPool = initializePool(globalMemStoreSize, + this.dataChunksPool = initializePool("data", globalMemStoreSize, (1 - indexChunkSizePercentage) * poolSizePercentage, initialCountPercentage, chunkSize, heapMemoryManager); // The index chunks pool is needed only when the index type is CCM. // Since the pools are not created at all when the index type isn't CCM, // we don't need to check it here. - this.indexChunksPool = initializePool(globalMemStoreSize, + this.indexChunksPool = initializePool("index", globalMemStoreSize, indexChunkSizePercentage * poolSizePercentage, initialCountPercentage, (int) (indexChunkSizePercentage * chunkSize), heapMemoryManager); @@ -333,8 +331,11 @@ public class ChunkCreator { private static final int statThreadPeriod = 60 * 5; private final AtomicLong chunkCount = new AtomicLong(); private final LongAdder reusedChunkCount = new LongAdder(); + private final String label; - MemStoreChunkPool(int chunkSize, int maxCount, int initialCount, float poolSizePercentage) { + MemStoreChunkPool(String label, int chunkSize, int maxCount, int initialCount, + float poolSizePercentage) { + this.label = label; this.chunkSize = chunkSize; this.maxCount = maxCount; this.poolSizePercentage = poolSizePercentage; @@ -423,12 +424,10 @@ public class ChunkCreator { long created = chunkCount.get(); long reused = reusedChunkCount.sum(); long total = created + reused; - LOG.debug("Stats (chunk size=" + chunkSize + "): " - + "current pool size=" + reclaimedChunks.size() - + ",created chunk count=" + created - + ",reused chunk count=" + reused - + ",reuseRatio=" + (total == 0 ? "0" : StringUtils.formatPercent( - (float) reused / (float) total, 2))); + LOG.debug("{} Stats (chunk size={}): current pool size={}, created chunk count={}, " + + "reused chunk count={}, reuseRatio={}", label, chunkSize, reclaimedChunks.size(), + created, reused, + (total == 0? "0": StringUtils.formatPercent((float)reused/(float)total,2))); } } @@ -440,7 +439,7 @@ public class ChunkCreator { public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) { // don't do any tuning in case of offheap memstore if (isOffheap()) { - LOG.warn("Not tuning the chunk pool as it is offheap"); + LOG.warn("{} not tuning the chunk pool as it is offheap", label); return; } int newMaxCount = @@ -450,12 +449,12 @@ public class ChunkCreator { if (newMaxCount > this.maxCount) { // Max chunks getting increased. Just change the variable. Later calls to getChunk() would // create and add them to Q - LOG.info("Max count for chunks increased from " + this.maxCount + " to " + newMaxCount); + LOG.info("{} max count for chunks increased from {} to {}", this.maxCount, newMaxCount); this.maxCount = newMaxCount; } else { // Max chunks getting decreased. We may need to clear off some of the pooled chunks now // itself. If the extra chunks are serving already, do not pool those when we get them back - LOG.info("Max count for chunks decreased from " + this.maxCount + " to " + newMaxCount); + LOG.info("{} max count for chunks decreased from {} to {}", this.maxCount, newMaxCount); this.maxCount = newMaxCount; if (this.reclaimedChunks.size() > newMaxCount) { synchronized (this) { @@ -474,11 +473,11 @@ public class ChunkCreator { chunkPoolDisabled = false; } - private MemStoreChunkPool initializePool(long globalMemStoreSize, float poolSizePercentage, - float initialCountPercentage, int chunkSize, - HeapMemoryManager heapMemoryManager) { + private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, + float poolSizePercentage, float initialCountPercentage, int chunkSize, + HeapMemoryManager heapMemoryManager) { if (poolSizePercentage <= 0) { - LOG.info("PoolSizePercentage is less than 0. So not using pool"); + LOG.info("{} poolSizePercentage is less than 0. So not using pool", label); return null; } if (chunkPoolDisabled) { @@ -490,14 +489,13 @@ public class ChunkCreator { } int maxCount = (int) (globalMemStoreSize * poolSizePercentage / chunkSize); if (initialCountPercentage > 1.0 || initialCountPercentage < 0) { - throw new IllegalArgumentException( - MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0"); + throw new IllegalArgumentException(label + " " + MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + + " must be between 0.0 and 1.0"); } int initialCount = (int) (initialCountPercentage * maxCount); - LOG.info("Allocating MemStoreChunkPool with chunk size " - + StringUtils.byteDesc(chunkSize) + ", max count " + maxCount - + ", initial count " + initialCount); - MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(chunkSize, maxCount, + LOG.info("{} allocating {} MemStoreChunkPool with chunk size {}, max count {}, initial count {}", + label, StringUtils.byteDesc(chunkSize),maxCount, initialCount); + MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, chunkSize, maxCount, initialCount, poolSizePercentage); if (heapMemoryManager != null && memStoreChunkPool != null) { // Register with Heap Memory manager diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index 04a553ea2c..7a4403b2ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -158,10 +158,10 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati @Override public String toString() { - return "compaction_queue=(" - + longCompactions.getQueue().size() + ":" + return "compactionQueue=(longCompactions=" + + longCompactions.getQueue().size() + ":shortCompactions=" + shortCompactions.getQueue().size() + ")" - + ", split_queue=" + splits.getQueue().size(); + + ", splitQueue=" + splits.getQueue().size(); } public String dumpQueue() { @@ -528,10 +528,10 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati @Override public String toString() { if (compaction != null) { - return "Request = " + compaction.getRequest(); + return "Request=" + compaction.getRequest(); } else { - return "regionName = " + region.toString() + ", storeName = " + store.toString() + - ", priority = " + queuedPriority + ", time = " + time; + return "region=" + region.toString() + ", storeName=" + store.toString() + + ", priority=" + queuedPriority + ", startTime=" + time; } } @@ -591,7 +591,7 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati boolean completed = region.compact(c, store, compactionThroughputController, user); long now = EnvironmentEdgeManager.currentTime(); - LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + + LOG.info(((completed) ? "Completed" : "Aborted") + " compaction " + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); if (completed) { // degenerate case: blocked regions require recursive enqueues @@ -619,7 +619,7 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati tracker.afterExecution(store); completeTracker.completed(store); region.decrementCompactionsQueuedCount(); - LOG.debug("CompactSplitThread Status: " + CompactSplit.this); + LOG.debug("Status: " + CompactSplit.this); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index d60b04924c..efdc8ab591 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -427,9 +427,8 @@ public class CompactingMemStore extends AbstractMemStore { // compaction is in progress compactor.start(); } catch (IOException e) { - LOG.warn("Unable to run memstore compaction. region " - + getRegionServices().getRegionInfo().getRegionNameAsString() + "store: " - + getFamilyName(), e); + LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}", + getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e); } } finally { inMemoryFlushInProgress.set(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ba7ab5b82b..31ebcc0cea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2077,7 +2077,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return false; } } - LOG.info("Starting compaction on " + store + " in region " + this + LOG.info("Starting compaction of " + store + " in " + this + (compaction.getRequest().isOffPeak()?" as an off-peak compaction":"")); doRegionCompactionPrep(); try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java index fbb5f75a46..c80b912605 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java @@ -85,12 +85,12 @@ public abstract class MemStoreCompactionStrategy { int numOfSegments = versionedList.getNumOfSegments(); if (numOfSegments > pipelineThreshold) { // to avoid too many segments, merge now - LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments); + LOG.trace("Strategy={}, store={}; merging {} segments", strategy, cfName, numOfSegments); return getMergingAction(); } // just flatten a segment - LOG.debug("{} {}; flattening a segment", strategy, cfName); + LOG.trace("Strategy={}, store={}; flattening a segment", strategy, cfName); return getFlattenAction(); } @@ -104,8 +104,8 @@ public abstract class MemStoreCompactionStrategy { protected Action compact(VersionedSegmentsList versionedList, String strategyInfo) { int numOfSegments = versionedList.getNumOfSegments(); - LOG.debug(strategyInfo+" memory compaction for store " + cfName - + " compacting " + numOfSegments + " segments"); + LOG.trace("{} in-memory compaction for store={} compacting {} segments", strategyInfo, + cfName, numOfSegments); return Action.COMPACT; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 5c908e5bff..1f8578d322 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -92,7 +92,7 @@ public class MemStoreCompactor { // get a snapshot of the list of the segments from the pipeline, // this local copy of the list is marked with specific version versionedList = compactingMemStore.getImmutableSegments(); - LOG.debug("Starting on {}/{}", + LOG.trace("Speculative compaction starting on {}/{}", compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(), compactingMemStore.getStore().getColumnFamilyName()); HStore store = compactingMemStore.getStore(); @@ -177,8 +177,8 @@ public class MemStoreCompactor { } } } catch (IOException e) { - LOG.debug("Interrupting the MemStore in-memory compaction for store " - + compactingMemStore.getFamilyName()); + LOG.trace("Interrupting in-memory compaction for store={}", + compactingMemStore.getFamilyName()); Thread.currentThread().interrupt(); } finally { // For the MERGE case, if the result was created, but swap didn't happen, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 9f6a015351..d87f310b90 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.util.CollectionUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hbase.thirdparty.io.netty.util.internal.StringUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -980,10 +981,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner heap.peek() == null || bytesRead < preadMaxBytes) { return; } - if (LOG.isDebugEnabled()) { - LOG.debug("Switch to stream read because we have already read " + bytesRead + - " bytes from this scanner"); - } + LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, + this.store.getColumnFamilyName()); scanUsePread = false; Cell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 056f076e2e..4650cca215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -199,15 +199,14 @@ public abstract class Compactor { } tmp = fileInfo.get(TIMERANGE_KEY); fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax(); - if (LOG.isDebugEnabled()) { - LOG.debug("Compacting " + file + - ", keycount=" + keyCount + - ", bloomtype=" + r.getBloomFilterType().toString() + - ", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) + - ", encoding=" + r.getHFileReader().getDataBlockEncoding() + - ", seqNum=" + seqNum + - (allFiles ? ", earliestPutTs=" + earliestPutTs: "")); - } + LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, encoding={}, seqNum={}{}", + ((file == null || file.getPath() == null)? null: file.getPath().getName()), + keyCount, + r.getBloomFilterType().toString(), + TraditionalBinaryPrefix.long2String(r.length(), "", 1), + r.getHFileReader().getDataBlockEncoding(), + seqNum, + (allFiles? ", earliestPutTs=" + earliestPutTs: "")); } return fd; } -- 2.11.0 (Apple Git-81)