diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 346ab5c..241a300 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -592,15 +592,15 @@ private static int countMaxStreams(Area area) { long stripeOffset, boolean hasFileId, IdentityHashMap toRelease) throws IOException { DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead); - if (LOG.isInfoEnabled()) { - LOG.info("Resulting disk ranges to read (file " + fileKey + "): " + if (LOG.isDebugEnabled()) { + LOG.debug("Resulting disk ranges to read (file " + fileKey + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } BooleanRef isAllInCache = new BooleanRef(); if (hasFileId) { cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache); - if (LOG.isInfoEnabled()) { - LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file " + if (LOG.isDebugEnabled()) { + LOG.debug("Disk ranges after cache (found everything " + isAllInCache.value + "; file " + fileKey + ", base offset " + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } @@ -2078,8 +2078,8 @@ private DiskRangeList preReadUncompressedStreams(long stripeOffset, ReadContext[ releaseBuffers(toRelease.keySet(), true); toRelease.clear(); } - if (LOG.isInfoEnabled()) { - LOG.info("Disk ranges after pre-read (file " + fileKey + ", base offset " + if (LOG.isDebugEnabled()) { + LOG.debug("Disk ranges after pre-read (file " + fileKey + ", base offset " + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } iter = toRead.next; // Reset the iter to start.