diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 433c87d..05ef3dc 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -247,7 +247,6 @@ public class HFile { public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { int version = getFormatVersion(conf); - LOG.debug("Using HFile format version " + version); switch (version) { case 1: return new HFileWriterV1.WriterFactoryV1(conf, cacheConf); diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index cdc7beb..ccc748d 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CompoundBloomFilterWriter; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableUtils; +import org.apache.hadoop.util.StringUtils; /** * Provides functionality to write ({@link BlockIndexWriter}) and read @@ -750,12 +751,15 @@ public class HFileBlockIndex { totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); - LOG.info("Wrote a " + numLevels + "-level index with root level at pos " + if (LOG.isDebugEnabled()) { + LOG.debug("Wrote a " + numLevels + "-level index with root level at pos " + out.getPos() + ", " + rootChunk.getNumEntries() + " root-level entries, " + totalNumEntries + " total entries, " - + totalBlockOnDiskSize + " bytes total on-disk size, " - + totalBlockUncompressedSize + " bytes total uncompressed size."); - + + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + + " on-disk size, " + + StringUtils.humanReadableInt(totalBlockUncompressedSize) + + " total uncompressed size."); + } return rootLevelIndexPos; } diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index f92bb19..d690ed1 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -301,7 +301,7 @@ public class HFileReaderV2 extends AbstractHFileReader { if (evictOnClose && cacheConf.isBlockCacheEnabled()) { int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name + HFile.CACHE_KEY_SEPARATOR); - LOG.debug("On close of file " + name + " evicted " + numEvicted + LOG.debug("On close, file=" + name + " evicted=" + numEvicted + " block(s)"); } if (closeIStream && istream != null) { diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 069eedf..6c1817c 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -178,7 +178,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { // Meta data block index writer metaBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(); - LOG.debug("HFileWriter initialized with " + cacheConf); + LOG.debug("Initialized with " + cacheConf); } /** diff --git src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java index 028e37c..6a99618 100644 --- src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java +++ src/main/java/org/apache/hadoop/hbase/master/DefaultLoadBalancer.java @@ -229,20 +229,6 @@ public class DefaultLoadBalancer implements LoadBalancer { NavigableMap> serversByLoad = new TreeMap>(); int numRegions = 0; - StringBuilder strBalanceParam = new StringBuilder("Server information: "); - // Iterate so we can count regions as we build the map - for (Map.Entry> server: clusterState.entrySet()) { - List regions = server.getValue(); - int sz = regions.size(); - if (sz == 0) emptyRegionServerPresent = true; - numRegions += sz; - serversByLoad.put(new ServerAndLoad(server.getKey(), sz), regions); - strBalanceParam.append(server.getKey().getServerName()).append("="). - append(server.getValue().size()).append(", "); - } - strBalanceParam.delete(strBalanceParam.length() - 2, - strBalanceParam.length()); - LOG.debug(strBalanceParam.toString()); // Check if we even need to do any load balancing float average = (float)numRegions / numServers; // for logging @@ -262,13 +248,13 @@ public class DefaultLoadBalancer implements LoadBalancer { int min = numRegions / numServers; int max = numRegions % numServers == 0 ? min : min + 1; - // Using to check banance result. - strBalanceParam.delete(0, strBalanceParam.length()); + // Using to check balance result. + StringBuilder strBalanceParam = new StringBuilder(); strBalanceParam.append("Balance parameter: numRegions=").append(numRegions) .append(", numServers=").append(numServers).append(", max=").append(max) .append(", min=").append(min); LOG.debug(strBalanceParam.toString()); - + // Balance the cluster // TODO: Look at data block locality or a more complex load to do this MinMaxPriorityQueue regionsToMove = diff --git src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 36f0ae9..b361af6 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -85,7 +85,7 @@ class LogRoller extends HasThread implements WALActionsListener { LOG.debug("Hlog roll period " + this.rollperiod + "ms elapsed"); } } else if (LOG.isDebugEnabled()) { - LOG.debug("HLog roll manually triggered"); + LOG.debug("HLog roll requested"); } rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH try { diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 94c5f1e..c9ad4a9 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -173,25 +173,27 @@ class MemStoreFlusher extends HasThread implements FlushRequester { HRegion regionToFlush; if (bestFlushableRegion != null && - bestAnyRegion.memstoreSize.get() > 2 * bestFlushableRegion.memstoreSize.get()) { + bestAnyRegion.memstoreSize.get() > 2 * bestFlushableRegion.memstoreSize.get()) { // Even if it's not supposed to be flushed, pick a region if it's more than twice // as big as the best flushable one - otherwise when we're under pressure we make // lots of little flushes and cause lots of compactions, etc, which just makes // life worse! - LOG.info("Under global heap pressure: " + + if (LOG.isDebugEnabled()) { + LOG.debug("Under global heap pressure: " + "Region " + bestAnyRegion.getRegionNameAsString() + " has too many " + "store files, but is " + StringUtils.humanReadableInt(bestAnyRegion.memstoreSize.get()) + " vs best flushable region's " + StringUtils.humanReadableInt(bestFlushableRegion.memstoreSize.get()) + ". Choosing the bigger."); - regionToFlush = bestAnyRegion; + } + regionToFlush = bestAnyRegion; } else { - if (bestFlushableRegion == null) { - regionToFlush = bestAnyRegion; - } else { - regionToFlush = bestFlushableRegion; - } + if (bestFlushableRegion == null) { + regionToFlush = bestAnyRegion; + } else { + regionToFlush = bestFlushableRegion; + } } Preconditions.checkState(regionToFlush.memstoreSize.get() > 0); @@ -216,7 +218,8 @@ class MemStoreFlusher extends HasThread implements FlushRequester { fqe = flushQueue.poll(threadWakeFrequency, TimeUnit.MILLISECONDS); if (fqe == null || fqe instanceof WakeupFlushThread) { if (isAboveLowWaterMark()) { - LOG.info("Flush thread woke up with memory above low water."); + LOG.debug("Flush thread woke up because memory above low water=" + + StringUtils.humanReadableInt(this.globalMemStoreLimitLowMark)); if (!flushOneForGlobalPressure()) { // Wasn't able to flush any region, but we're above low water mark // This is unlikely to happen, but might happen when closing the diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 7761c42..fcbcf42 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -370,7 +370,7 @@ public class Store implements HeapSize { } Path dstPath = StoreFile.getRandomFilename(fs, homedir); - LOG.info("Renaming bulk load file " + srcPath + " to " + dstPath); + LOG.debug("Renaming bulk load file " + srcPath + " to " + dstPath); StoreFile.rename(fs, srcPath, dstPath); StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf, @@ -524,7 +524,7 @@ public class Store implements HeapSize { Path dstPath = new Path(homedir, fileName); validateStoreFile(writer.getPath()); String msg = "Renaming flushed file at " + writer.getPath() + " to " + dstPath; - LOG.info(msg); + LOG.debug(msg); status.setStatus("Flushing " + this + ": " + msg); if (!fs.rename(writer.getPath(), dstPath)) { LOG.warn("Unable to rename " + writer.getPath() + " to " + dstPath); @@ -536,7 +536,7 @@ public class Store implements HeapSize { StoreFile.Reader r = sf.createReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); - if(LOG.isInfoEnabled()) { + if (LOG.isInfoEnabled()) { LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId + ", memsize=" + StringUtils.humanReadableInt(flushed) + @@ -664,7 +664,7 @@ public class Store implements HeapSize { LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in " + this.storeNameStr + " of " + this.region.getRegionInfo().getRegionNameAsString() - + " into " + region.getTmpDir() + ", seqid=" + maxId + ", totalSize=" + + " into tmpdir=" + region.getTmpDir() + ", seqid=" + maxId + ", totalSize=" + StringUtils.humanReadableInt(cr.getSize())); StoreFile sf = null; @@ -685,8 +685,9 @@ public class Store implements HeapSize { LOG.info("Completed" + (cr.isMajor() ? " major " : " ") + "compaction of " + filesToCompact.size() + " file(s) in " + this.storeNameStr + " of " + this.region.getRegionInfo().getRegionNameAsString() - + "; new storefile name=" + (sf == null ? "none" : sf.toString()) - + ", size=" + (sf == null ? "none" : + + " into " + + (sf == null ? "none" : sf.getPath().getName()) + + ", size=" + (sf == null ? "none" : StringUtils.humanReadableInt(sf.getReader().length())) + "; total size for store is " + StringUtils.humanReadableInt(storeSize)); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 63b9a95..b245025 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; -import java.util.Date; import java.util.List; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; @@ -199,7 +198,7 @@ public class CompactionRequest implements Comparable, server.checkFileSystem(); } finally { s.finishRequest(this); - LOG.debug("CompactSplitThread Status: " + server.compactSplitThread); + LOG.debug("CompactSplitThread status: " + server.compactSplitThread); } } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index b16fd6b..8591864 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -553,9 +553,6 @@ public class HLog implements Syncable { long currentFilenum = this.filenum; this.filenum = System.currentTimeMillis(); Path newPath = computeFilename(); - if (LOG.isDebugEnabled()) { - LOG.debug("Enabling new writer for "+FSUtils.getPath(newPath)); - } HLog.Writer nextWriter = this.createWriterInstance(fs, newPath, conf); // Can we get at the dfsclient outputstream? If an instance of // SFLW, it'll have done the necessary reflection to get at the @@ -582,7 +579,7 @@ public class HLog implements Syncable { this.numEntries.get() + ", filesize=" + this.fs.getFileStatus(oldFile).getLen() + ". ": "") + - "New hlog " + FSUtils.getPath(newPath)); + " for " + FSUtils.getPath(newPath)); this.numEntries.set(0); } // Can we delete any of the old log files? diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java index 833e840..2fee321 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java @@ -93,8 +93,9 @@ public class SequenceFileLogWriter implements HLog.Writer { this.writer_out = getSequenceFilePrivateFSDataOutputStreamAccessible(); this.syncFs = getSyncFs(); this.hflush = getHFlush(); - String msg = - "syncFs=" + (this.syncFs != null) + ", hflush=" + (this.hflush != null); + String msg = "Path=" + path + + ", syncFs=" + (this.syncFs != null) + + ", hflush=" + (this.hflush != null); if (this.syncFs != null || this.hflush != null) { LOG.debug(msg); } else { diff --git src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 753b86e..33ecfb3 100644 --- src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -139,12 +139,12 @@ public final class BloomFilterFactory { CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isBloomEnabled(conf)) { - LOG.info("Bloom filters are disabled by configuration for " + LOG.debug("Bloom filters are disabled by configuration for " + writer.getPath() + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { - LOG.info("Bloom filter is turned off for the column family"); + LOG.debug("Bloom filter is turned off for the column family"); return null; }