diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffInputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffInputStream.java deleted file mode 100644 index 4f6b3c2..0000000 --- hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBuffInputStream.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io; - -import java.io.InputStream; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.nio.ByteBuff; - -/** - * Not thread safe! - *
- * Please note that the reads will cause position movement on wrapped ByteBuff.
- */
-@InterfaceAudience.Private
-public class ByteBuffInputStream extends InputStream {
-
- private ByteBuff buf;
-
- public ByteBuffInputStream(ByteBuff buf) {
- this.buf = buf;
- }
-
- /**
- * Reads the next byte of data from this input stream. The value byte is returned as an
- * int in the range 0 to 255. If no byte is available
- * because the end of the stream has been reached, the value -1 is returned.
- * @return the next byte of data, or -1 if the end of the stream has been reached.
- */
- public int read() {
- if (this.buf.hasRemaining()) {
- return (this.buf.get() & 0xff);
- }
- return -1;
- }
-
- /**
- * Reads up to next len bytes of data from buffer into passed array(starting from
- * given offset).
- * @param b the array into which the data is read.
- * @param off the start offset in the destination array b
- * @param len the maximum number of bytes to read.
- * @return the total number of bytes actually read into the buffer, or -1 if not even
- * 1 byte can be read because the end of the stream has been reached.
- */
- public int read (byte b[], int off, int len) {
- int avail = available();
- if (avail <= 0) {
- return -1;
- }
- if (len <= 0) {
- return 0;
- }
-
- if (len > avail) {
- len = avail;
- }
- this.buf.get(b, off, len);
- return len;
- }
-
- /**
- * Skips n bytes of input from this input stream. Fewer bytes might be skipped if the
- * end of the input stream is reached. The actual number k of bytes to be skipped is
- * equal to the smaller of n and remaining bytes in the stream.
- * @param n the number of bytes to be skipped.
- * @return the actual number of bytes skipped.
- */
- public long skip(long n) {
- long k = Math.min(n, available());
- if (k <= 0) {
- return 0;
- }
- this.buf.skip((int) k);
- return k;
- }
-
- /**
- * @return the number of remaining bytes that can be read (or skipped
- * over) from this input stream.
- */
- public int available() {
- return this.buf.remaining();
- }
-}
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferInputStream.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferInputStream.java
new file mode 100644
index 0000000..11675ff
--- /dev/null
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferInputStream.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io;
+
+import java.io.InputStream;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+
+/**
+ * Not thread safe!
+ *
+ * Please note that the reads will cause position movement on wrapped ByteBuff.
+ */
+@InterfaceAudience.Private
+public class ByteBufferInputStream extends InputStream {
+
+ private ByteBuff buf;
+
+ public ByteBufferInputStream(ByteBuff buf) {
+ this.buf = buf;
+ }
+
+ /**
+ * Reads the next byte of data from this input stream. The value byte is returned as an
+ * int in the range 0 to 255. If no byte is available
+ * because the end of the stream has been reached, the value -1 is returned.
+ * @return the next byte of data, or -1 if the end of the stream has been reached.
+ */
+ public int read() {
+ if (this.buf.hasRemaining()) {
+ return (this.buf.get() & 0xff);
+ }
+ return -1;
+ }
+
+ /**
+ * Reads up to next len bytes of data from buffer into passed array(starting from
+ * given offset).
+ * @param b the array into which the data is read.
+ * @param off the start offset in the destination array b
+ * @param len the maximum number of bytes to read.
+ * @return the total number of bytes actually read into the buffer, or -1 if not even
+ * 1 byte can be read because the end of the stream has been reached.
+ */
+ public int read (byte b[], int off, int len) {
+ int avail = available();
+ if (avail <= 0) {
+ return -1;
+ }
+ if (len <= 0) {
+ return 0;
+ }
+
+ if (len > avail) {
+ len = avail;
+ }
+ this.buf.get(b, off, len);
+ return len;
+ }
+
+ /**
+ * Skips n bytes of input from this input stream. Fewer bytes might be skipped if the
+ * end of the input stream is reached. The actual number k of bytes to be skipped is
+ * equal to the smaller of n and remaining bytes in the stream.
+ * @param n the number of bytes to be skipped.
+ * @return the actual number of bytes skipped.
+ */
+ public long skip(long n) {
+ long k = Math.min(n, available());
+ if (k <= 0) {
+ return 0;
+ }
+ this.buf.skip((int) k);
+ return k;
+ }
+
+ /**
+ * @return the number of remaining bytes that can be read (or skipped
+ * over) from this input stream.
+ */
+ public int available() {
+ return this.buf.remaining();
+ }
+}
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
index 30382d9..417e9f5 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/HFileBlockDefaultDecodingContext.java
@@ -22,7 +22,7 @@ import java.io.InputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.ByteBuffInputStream;
+import org.apache.hadoop.hbase.io.ByteBufferInputStream;
import org.apache.hadoop.hbase.io.TagCompressionContext;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Cipher;
@@ -52,7 +52,7 @@ public class HFileBlockDefaultDecodingContext implements
@Override
public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader,
ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException {
- InputStream in = new DataInputStream(new ByteBuffInputStream(onDiskBlock));
+ InputStream in = new DataInputStream(new ByteBufferInputStream(onDiskBlock));
Encryption.Context cryptoContext = fileContext.getEncryptionContext();
if (cryptoContext != Encryption.Context.NONE) {
diff --git hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestMultiByteBuffInputStream.java hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestMultiByteBuffInputStream.java
index ed96e87..8c6c65c 100644
--- hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestMultiByteBuffInputStream.java
+++ hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestMultiByteBuffInputStream.java
@@ -50,7 +50,7 @@ public class TestMultiByteBuffInputStream {
// bbis contains 19 bytes
// 1 byte, 4 bytes int, 4 bytes string, 8 bytes long and 2 bytes short
- ByteBuffInputStream bbis = new ByteBuffInputStream(new MultiByteBuff(bb));
+ ByteBufferInputStream bbis = new ByteBufferInputStream(new MultiByteBuff(bb));
assertEquals(15 + s.length(), bbis.available());
assertEquals(1, bbis.read());
byte[] ib = new byte[4];
@@ -74,7 +74,7 @@ public class TestMultiByteBuffInputStream {
bbis.close();
bb = ByteBuffer.wrap(bos.toByteArray());
- bbis = new ByteBuffInputStream(new MultiByteBuff(bb));
+ bbis = new ByteBufferInputStream(new MultiByteBuff(bb));
DataInputStream dis = new DataInputStream(bbis);
dis.read();
assertEquals(i, dis.readInt());
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 0a25825..4c77782 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-import org.apache.hadoop.hbase.io.ByteBuffInputStream;
+import org.apache.hadoop.hbase.io.ByteBufferInputStream;
import org.apache.hadoop.hbase.io.ByteBufferSupportDataOutputStream;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext;
@@ -658,7 +658,7 @@ public class HFileBlock implements Cacheable {
public DataInputStream getByteStream() {
ByteBuff dup = this.buf.duplicate();
dup.position(this.headerSize());
- return new DataInputStream(new ByteBuffInputStream(dup));
+ return new DataInputStream(new ByteBufferInputStream(dup));
}
@Override