diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java index cdc0372..43d2933 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetadataReader.java @@ -82,8 +82,7 @@ public MetadataReader(FSDataInputStream file, } if ((included == null || included[col]) && indexes[col] == null) { byte[] buffer = new byte[len]; - file.seek(offset); - file.readFully(buffer); + file.readFully(offset, buffer, 0, buffer.length); ByteBuffer[] bb = new ByteBuffer[] {ByteBuffer.wrap(buffer)}; indexes[col] = OrcProto.RowIndex.parseFrom(InStream.create("index", bb, new long[]{0}, stream.getLength(), codec, bufferSize)); @@ -108,8 +107,7 @@ public MetadataReader(FSDataInputStream file, // read the footer ByteBuffer tailBuf = ByteBuffer.allocate(tailLength); - file.seek(offset); - file.readFully(tailBuf.array(), tailBuf.arrayOffset(), tailLength); + file.readFully(offset, tailBuf.array(), tailBuf.arrayOffset(), tailLength); return OrcProto.StripeFooter.parseFrom(InStream.create("footer", Lists.newArrayList(new BufferChunk(tailBuf, 0)), tailLength, codec, bufferSize)); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java index c990d85..6589e41 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java @@ -243,9 +243,8 @@ static void ensureOrcFooter(FSDataInputStream in, if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) { // If it isn't there, this may be the 0.11.0 version of ORC. // Read the first 3 bytes of the file to check for the header - in.seek(0); byte[] header = new byte[len]; - in.readFully(header, 0, len); + in.readFully(0, header, 0, len); // if it isn't there, this isn't an ORC file if (!Text.decode(header, 0 , len).equals(OrcFile.MAGIC)) { throw new FileFormatException("Malformed ORC file " + path + @@ -365,10 +364,9 @@ private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, //read last bytes into buffer to get PostScript int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS); - file.seek(size - readSize); ByteBuffer buffer = ByteBuffer.allocate(readSize); - file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), - buffer.remaining()); + file.readFully((size - readSize), + buffer.array(), buffer.arrayOffset(), readSize); //read the PostScript //get length of PostScript @@ -408,10 +406,9 @@ private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize); if (extra > 0) { //more bytes need to be read, seek back to the right place and read extra bytes - file.seek(size - readSize - extra); ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize); - file.readFully(extraBuf.array(), - extraBuf.arrayOffset() + extraBuf.position(), extra); + file.readFully((size - readSize - extra), extraBuf.array(), + extraBuf.arrayOffset() + extraBuf.position(), extra); extraBuf.position(extra); //append with already read bytes extraBuf.put(buffer); diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java index ded3979..9c9a1c0 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderUtils.java @@ -246,8 +246,8 @@ static DiskRangeList readDiskRanges(FSDataInputStream file, } int len = (int) (range.getEnd() - range.getOffset()); long off = range.getOffset(); - file.seek(base + off); if (zcr != null) { + file.seek(base + off); boolean hasReplaced = false; while (len > 0) { ByteBuffer partial = zcr.readBuffer(len, false); @@ -264,12 +264,13 @@ static DiskRangeList readDiskRanges(FSDataInputStream file, off += read; } } else if (doForceDirect) { + file.seek(base + off); ByteBuffer directBuf = ByteBuffer.allocateDirect(len); readDirect(file, len, directBuf); range = range.replaceSelfWith(new BufferChunk(directBuf, range.getOffset())); } else { byte[] buffer = new byte[len]; - file.readFully(buffer, 0, buffer.length); + file.readFully((base + off), buffer, 0, buffer.length); range = range.replaceSelfWith(new BufferChunk(ByteBuffer.wrap(buffer), range.getOffset())); } range = range.next;