diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 0536b6e..0c1fa3f 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -655,9 +655,8 @@ implements WritableComparable { */ @Override public String toString() { - return "REGION => {" + HConstants.NAME + " => '" + + return "{" + HConstants.NAME + " => '" + this.regionNameStr - + "', TableName => '" + Bytes.toStringBinary(this.tableName) + "', STARTKEY => '" + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index dc1e872..19fee5c 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -289,8 +289,9 @@ public class MetaEditor { delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER); delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER); deleteMetaTable(catalogTracker, delete); - LOG.info("Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + " and qualifier=" - + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + ", from parent " + parent.getRegionNameAsString()); + LOG.info("Deleted daughters references, qualifier=" + Bytes.toStringBinary(HConstants.SPLITA_QUALIFIER) + + " and qualifier=" + Bytes.toStringBinary(HConstants.SPLITB_QUALIFIER) + + ", from parent " + parent.getRegionNameAsString()); } public static HRegionInfo getHRegionInfo( @@ -317,4 +318,4 @@ public class MetaEditor { Bytes.toBytes(sn.getStartcode())); return p; } -} \ No newline at end of file +} diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index e4de22a..6af1f82 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -1816,7 +1816,7 @@ public class HConnectionManager { } else { close(true); } - LOG.debug("The connection to " + this.zooKeeper + " has been closed."); + if (LOG.isTraceEnabled()) LOG.debug("" + this.zooKeeper + " closed."); } /** diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 8cf220b..3f6ccb6 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -746,8 +746,8 @@ public class HFileBlockIndex { totalBlockUncompressedSize += blockWriter.getUncompressedSizeWithoutHeader(); - if (LOG.isDebugEnabled()) { - LOG.debug("Wrote a " + numLevels + "-level index with root level at pos " + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote a " + numLevels + "-level index with root level at pos " + out.getPos() + ", " + rootChunk.getNumEntries() + " root-level entries, " + totalNumEntries + " total entries, " + StringUtils.humanReadableInt(this.totalBlockOnDiskSize) + @@ -782,9 +782,11 @@ public class HFileBlockIndex { rootChunk = curInlineChunk; curInlineChunk = new BlockIndexChunk(); - LOG.info("Wrote a single-level " + description + " index with " + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote a single-level " + description + " index with " + rootChunk.getNumEntries() + " entries, " + rootChunk.getRootSize() + " bytes"); + } rootChunk.writeRoot(out); } diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index 1bae261..c1f304e 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -331,8 +331,10 @@ public class HFileReaderV2 extends AbstractHFileReader { if (evictOnClose && cacheConf.isBlockCacheEnabled()) { int numEvicted = cacheConf.getBlockCache().evictBlocksByPrefix(name + HFile.CACHE_KEY_SEPARATOR); - LOG.debug("On close, file=" + name + " evicted=" + numEvicted + if (LOG.isTraceEnabled()) { + LOG.trace("On close, file=" + name + " evicted=" + numEvicted + " block(s)"); + } } if (closeIStream && istream != null) { istream.close(); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java index f381234..2d544dd 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/SplitRegionHandler.java @@ -110,7 +110,7 @@ public class SplitRegionHandler extends EventHandler implements TotesHRegionInfo parent.getEncodedName() + ")", e); } } - LOG.info("Handled SPLIT report); parent=" + + LOG.info("Handled SPLIT event; parent=" + this.parent.getRegionNameAsString() + " daughter a=" + this.daughters.get(0).getRegionNameAsString() + "daughter b=" + this.daughters.get(1).getRegionNameAsString()); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 6f37b84..94a8c1d 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1580,7 +1580,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Add to online regions if all above was successful. addToOnlineRegions(r); - LOG.info("addToOnlineRegions is done" + r.getRegionInfo()); // Update ZK, ROOT or META if (r.getRegionInfo().isRootRegion()) { RootLocationEditor.setRootLocation(getZooKeeper(), @@ -1598,7 +1597,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, this.serverNameFromMasterPOV); } } - LOG.info("Done with post open deploy taks for region=" + + LOG.info("Done with post open deploy task for region=" + r.getRegionNameAsString() + ", daughter=" + daughter); } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 0cc2f63..08b7de3 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -393,7 +393,7 @@ public class SplitTransaction { // that it's possible for the master to miss an event. do { if (spins % 10 == 0) { - LOG.info("Still waiting on the master to process the split for " + + LOG.debug("Still waiting on the master to process the split for " + this.parent.getRegionInfo().getEncodedName()); } Thread.sleep(100); diff --git src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java index 79c7220..418bd16 100644 --- src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java +++ src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java @@ -173,12 +173,12 @@ public final class BloomFilterFactory { CacheConfig cacheConf, BloomType bloomType, int maxKeys, HFile.Writer writer) { if (!isGeneralBloomEnabled(conf)) { - LOG.debug("Bloom filters are disabled by configuration for " + LOG.trace("Bloom filters are disabled by configuration for " + writer.getPath() + (conf == null ? " (configuration is null)" : "")); return null; } else if (bloomType == BloomType.NONE) { - LOG.debug("Bloom filter is turned off for the column family"); + LOG.trace("Bloom filter is turned off for the column family"); return null; }