diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 346ab5c..aa608b9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -593,14 +593,14 @@ private static int countMaxStreams(Area area) { throws IOException { DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead); if (LOG.isInfoEnabled()) { - LOG.info("Resulting disk ranges to read (file " + fileKey + "): " + LOG.debug("Resulting disk ranges to read (file " + fileKey + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } BooleanRef isAllInCache = new BooleanRef(); if (hasFileId) { cacheWrapper.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache); if (LOG.isInfoEnabled()) { - LOG.info("Disk ranges after cache (found everything " + isAllInCache.value + "; file " + LOG.debug("Disk ranges after cache (found everything " + isAllInCache.value + "; file " + fileKey + ", base offset " + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } @@ -2079,7 +2079,7 @@ private DiskRangeList preReadUncompressedStreams(long stripeOffset, ReadContext[ toRelease.clear(); } if (LOG.isInfoEnabled()) { - LOG.info("Disk ranges after pre-read (file " + fileKey + ", base offset " + LOG.debug("Disk ranges after pre-read (file " + fileKey + ", base offset " + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next)); } iter = toRead.next; // Reset the iter to start.