.META..
- *
+ *
* This class is "read-only" in that the locations of the catalog tables cannot
* be explicitly set. Instead, ZooKeeper is used to learn of the availability
* and location of .META..
@@ -65,7 +65,7 @@ public class CatalogTracker {
// servers when they needed to know of meta movement but also by
// client-side (inside in HTable) so rather than figure meta
// locations on fault, the client would instead get notifications out of zk.
- //
+ //
// But this original intent is frustrated by the fact that this class has to
// read an hbase table, the -ROOT- table, to figure out the .META. region
// location which means we depend on an HConnection. HConnection will do
@@ -110,13 +110,6 @@ public class CatalogTracker {
private boolean instantiatedzkw = false;
private Abortable abortable;
- /*
- * Do not clear this address once set. Its needed when we do
- * server shutdown processing -- we need to know who had .META. last. If you
- * want to know if the address is good, rely on {@link #metaAvailable} value.
- */
- private ServerName metaLocation;
-
private boolean stopped = false;
static final byte [] META_REGION_NAME =
@@ -147,7 +140,7 @@ public class CatalogTracker {
* @param abortable If fatal exception we'll call abort on this. May be null.
* If it is we'll use the Connection associated with the passed
* {@link Configuration} as our Abortable.
- * @throws IOException
+ * @throws IOException
*/
public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
Abortable abortable)
@@ -193,7 +186,7 @@ public class CatalogTracker {
* Determines current availability of catalog tables and ensures all further
* transitions of either region are tracked.
* @throws IOException
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public void start() throws IOException, InterruptedException {
LOG.debug("Starting catalog tracker " + this);
@@ -235,7 +228,7 @@ public class CatalogTracker {
* not currently available.
* @return {@link ServerName} for server hosting .META. or null
* if none available
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public ServerName getMetaLocation() throws InterruptedException {
return this.metaRegionTracker.getMetaRegionLocation();
@@ -309,8 +302,6 @@ public class CatalogTracker {
LOG.info(".META. still not available, sleeping and retrying." +
" Reason: " + e.getMessage());
}
- } catch (IOException e) {
- LOG.info("Retrying", e);
}
}
}
@@ -356,7 +347,7 @@ public class CatalogTracker {
} else {
throw ioe;
}
-
+
}
return protocol;
}
@@ -406,7 +397,7 @@ public class CatalogTracker {
}
}
LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
- " at address=" + address + "; " + t);
+ " at address=" + address + ", exception=" + t);
return false;
}
@@ -416,7 +407,7 @@ public class CatalogTracker {
* the internal call to {@link #waitForMetaServerConnection(long)}.
* @return True if the .META. location is healthy.
* @throws IOException
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public boolean verifyMetaRegionLocation(final long timeout)
throws InterruptedException, IOException {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java
index ba14702..9a41fcb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java
@@ -63,6 +63,11 @@ public class Action@@ -66,10 +65,21 @@ public class Append extends Mutation { * Create a Append operation for the specified row. *
* At least one column must be appended to. - * @param row row key + * @param row row key; makes a local copy of passed in array. */ public Append(byte[] row) { - this.row = Arrays.copyOf(row, row.length); + this.row = Bytes.copy(row); + } + + /** Create a Append operation for the specified row. + *
+ * At least one column must be appended to.
+ * @param rowArray Makes a copy out of this buffer.
+ * @param rowOffset
+ * @param rowLength
+ */
+ public Append(final byte [] rowArray, final int rowOffset, final int rowLength) {
+ this.row = Bytes.copy(rowArray, rowOffset, rowLength);
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 0db0167..f22c4ed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
@@ -96,6 +97,25 @@ public class Delete extends Mutation implements Comparable
+ *
+ * If no further operations are done, this will delete all columns in all
+ * families of the specified row with a timestamp less than or equal to the
+ * specified timestamp.
+ *
+ * This timestamp is ONLY used for a delete row operation. If specifying
+ * families or columns, you must specify each timestamp individually.
+ * @param rowArray We make a local copy of this passed in row.
+ * @param rowOffset
+ * @param rowLength
+ * @param ts maximum version timestamp (only for delete row)
+ */
+ public Delete(final byte [] rowArray, final int rowOffset, final int rowLength, long ts) {
+ this.row = Bytes.copy(rowArray, rowOffset, rowLength);
+ this.ts = ts;
+ }
+
+ /**
* @param d Delete to clone.
*/
public Delete(final Delete d) {
@@ -121,10 +141,8 @@ public class Delete extends Mutation implements Comparable
* At least one column must be incremented.
- * @param row row key
+ * @param row row key (we will make a copy of this).
*/
public Increment(byte [] row) {
- if (row == null || row.length > HConstants.MAX_ROW_LENGTH) {
+ this(row, 0, row.length);
+ }
+
+ /**
+ * Create a Increment operation for the specified row.
+ *
+ * At least one column must be incremented.
+ * @param row row key (we will make a copy of this).
+ */
+ public Increment(final byte [] row, final int offset, final int length) {
+ if (row == null || length <= 0 || length > HConstants.MAX_ROW_LENGTH) {
throw new IllegalArgumentException("Row key is invalid");
}
- this.row = Arrays.copyOf(row, row.length);
+ this.row = Bytes.copy(row, offset, length);
+ }
+
+ /**
+ * Add the specified KeyValue to this operation.
+ * @param cell individual Cell
+ * @return this
+ * @throws java.io.IOException e
+ */
+ @SuppressWarnings("unchecked")
+ public Increment add(Cell cell) throws IOException{
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ byte [] family = kv.getFamily();
+ List extends Cell> list = getCellList(family);
+ //Checking that the row of the kv is the same as the put
+ int res = Bytes.compareTo(this.row, 0, row.length,
+ kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
+ if (res != 0) {
+ throw new WrongRowIOException("The row in " + kv.toString() +
+ " doesn't match the original one " + Bytes.toStringBinary(this.row));
+ }
+ ((List We compare and equate mutations based off their row so be careful putting RowMutations
+ * into Sets or using them as keys in Maps.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@@ -89,6 +92,16 @@ public class RowMutations implements Row {
}
@Override
+ public boolean equals(Object obj) {
+ if (obj == this) return true;
+ if (obj instanceof RowMutations) {
+ RowMutations other = (RowMutations)obj;
+ return compareTo(other) == 0;
+ }
+ return false;
+ }
+
+ @Override
public byte[] getRow() {
return row;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java
new file mode 100644
index 0000000..a44dc8a
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.exceptions.HBaseIOException;
+
+public class WrongRowIOException extends HBaseIOException {
+ private static final long serialVersionUID = -5849522209440123059L;
+
+ public WrongRowIOException(final String msg) {
+ super(msg);
+ }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java
index 3d759e8..4594964 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.exceptions;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Exception thrown by access-related methods.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java
index a66591e..5e5409a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Thrown if a coprocessor encounters any exception.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java
index 61288ea..3b92b35 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* This exception is thrown when attempts to read an HFile fail due to corruption or truncation
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java
index a539a66..8973233 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java
@@ -56,4 +56,4 @@ public class DoNotRetryIOException extends HBaseIOException {
public DoNotRetryIOException(Throwable cause) {
super(cause);
}
-}
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java
index 1d1cece..0ba7650 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Reports a problem with a lease
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
index b262bd1..12008e2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
@@ -23,6 +23,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
/**
@@ -48,6 +49,11 @@ public class NullComparator extends ByteArrayComparable {
}
@Override
+ public int hashCode() {
+ return 0;
+ }
+
+ @Override
public int compareTo(byte[] value, int offset, int length) {
throw new UnsupportedOperationException();
}
@@ -69,9 +75,9 @@ public class NullComparator extends ByteArrayComparable {
*/
public static NullComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {
- ComparatorProtos.NullComparator proto;
try {
- proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
+ @SuppressWarnings("unused")
+ ComparatorProtos.NullComparator proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java
index 26ab35d..c223fa5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java
@@ -46,7 +46,7 @@ public enum AuthMethod {
private static final int FIRST_CODE = values()[0].code;
/** Return the object represented by the code. */
- private static AuthMethod valueOf(byte code) {
+ public static AuthMethod valueOf(byte code) {
final int i = (code & 0xff) - FIRST_CODE;
return i < 0 || i >= values().length ? null : values()[i];
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index ea0b255..f147299 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -1587,7 +1587,7 @@ public class ZKUtil {
try {
getReplicationZnodesDump(zkw, sb);
} catch (KeeperException ke) {
- LOG.warn("Couldn't get the replication znode dump." + ke.getStackTrace());
+ LOG.warn("Couldn't get the replication znode dump", ke);
}
sb.append("\nQuorum Server Statistics:");
String[] servers = zkw.getQuorum().split(",");
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 2e9e4f2..2993dd5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -34,6 +34,9 @@ import com.google.common.primitives.Longs;
* regionname, from row. See KeyValue for how it has a special comparator to do .META. cells
* and yet another for -ROOT-.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="UNKNOWN",
+ justification="Findbugs doesn't like the way we are negating the result of a compare in below")
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CellComparator implements ComparatorInteger.MAX_SIZE. The column does not contain the family/qualifier delimiter,
* {@link #COLUMN_FAMILY_DELIMITER}
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="CN_IDIOM_NO_SUPER_CALL",
+ justification="Its PITA calling the super.clone")
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KeyValue implements Cell, HeapSize, Cloneable {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
index 3743069..0edb781 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
@@ -22,12 +22,12 @@ import java.io.InputStream;
import org.apache.hadoop.hbase.Cell;
-abstract class BaseDecoder implements Codec.Decoder {
- final InputStream in;
+public abstract class BaseDecoder implements Codec.Decoder {
+ protected final InputStream in;
private boolean hasNext = true;
private Cell current = null;
- BaseDecoder(final InputStream in) {
+ public BaseDecoder(final InputStream in) {
this.in = in;
}
@@ -50,7 +50,7 @@ abstract class BaseDecoder implements Codec.Decoder {
* @return extract a Cell
* @throws IOException
*/
- abstract Cell parseCell() throws IOException;
+ protected abstract Cell parseCell() throws IOException;
@Override
public Cell current() {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java
index c7a4aab..941fb0e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java
@@ -22,7 +22,7 @@ import java.io.OutputStream;
import org.apache.hadoop.hbase.Cell;
-abstract class BaseEncoder implements Codec.Encoder {
+public abstract class BaseEncoder implements Codec.Encoder {
protected final OutputStream out;
// This encoder is 'done' once flush has been called.
protected boolean flushed = false;
@@ -34,7 +34,7 @@ abstract class BaseEncoder implements Codec.Encoder {
@Override
public abstract void write(Cell cell) throws IOException;
- void checkFlushed() throws CodecException {
+ protected void checkFlushed() throws CodecException {
if (this.flushed) throw new CodecException("Flushed; done");
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
index e3b7972..2acf9de 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
@@ -77,7 +77,7 @@ public class CellCodec implements Codec {
super(in);
}
- Cell parseCell() throws IOException {
+ protected Cell parseCell() throws IOException {
byte [] row = readByteArray(this.in);
byte [] family = readByteArray(in);
byte [] qualifier = readByteArray(in);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java
index 33a21b8..99db830 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java
@@ -66,7 +66,7 @@ public class KeyValueCodec implements Codec {
super(in);
}
- Cell parseCell() throws IOException {
+ protected Cell parseCell() throws IOException {
return KeyValue.iscreate(in);
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
new file mode 100644
index 0000000..ec48bdb
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
@@ -0,0 +1,137 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.WritableByteChannel;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Not thread safe!
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ByteBufferOutputStream extends OutputStream {
+
+ protected ByteBuffer buf;
+
+ public ByteBufferOutputStream(int capacity) {
+ this(capacity, false);
+ }
+
+ public ByteBufferOutputStream(int capacity, boolean useDirectByteBuffer) {
+ if (useDirectByteBuffer) {
+ buf = ByteBuffer.allocateDirect(capacity);
+ } else {
+ buf = ByteBuffer.allocate(capacity);
+ }
+ }
+
+ public int size() {
+ return buf.position();
+ }
+
+ /**
+ * This flips the underlying BB so be sure to use it _last_!
+ * @return ByteBuffer
+ */
+ public ByteBuffer getByteBuffer() {
+ buf.flip();
+ return buf;
+ }
+
+ private void checkSizeAndGrow(int extra) {
+ if ( (buf.position() + extra) > buf.limit()) {
+ // size calculation is complex, because we could overflow negative,
+ // and/or not allocate enough space. this fixes that.
+ int newSize = (int)Math.min((((long)buf.capacity()) * 2),
+ (long)(Integer.MAX_VALUE));
+ newSize = Math.max(newSize, buf.position() + extra);
+
+ ByteBuffer newBuf = ByteBuffer.allocate(newSize);
+ buf.flip();
+ newBuf.put(buf);
+ buf = newBuf;
+ }
+ }
+
+ // OutputStream
+ @Override
+ public void write(int b) throws IOException {
+ checkSizeAndGrow(Bytes.SIZEOF_BYTE);
+
+ buf.put((byte)b);
+ }
+
+ /**
+ * Writes the complete contents of this byte buffer output stream to
+ * the specified output stream argument.
+ *
+ * @param out the output stream to which to write the data.
+ * @exception IOException if an I/O error occurs.
+ */
+ public synchronized void writeTo(OutputStream out) throws IOException {
+ WritableByteChannel channel = Channels.newChannel(out);
+ ByteBuffer bb = buf.duplicate();
+ bb.flip();
+ channel.write(bb);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ checkSizeAndGrow(b.length);
+
+ buf.put(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ checkSizeAndGrow(len);
+
+ buf.put(b, off, len);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ // noop
+ }
+
+ @Override
+ public void close() throws IOException {
+ // noop again. heh
+ }
+
+ public byte[] toByteArray(int offset, int length) {
+ ByteBuffer bb = buf.duplicate();
+ bb.flip();
+
+ byte[] chunk = new byte[length];
+
+ bb.position(offset);
+ bb.get(chunk, 0, length);
+ return chunk;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
index 039187d..4c29120 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
@@ -209,8 +209,9 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder {
state.familyNameWithSize =
new byte[(state.familyLength & 0xff) + KeyValue.FAMILY_LENGTH_SIZE];
state.familyNameWithSize[0] = state.familyLength;
- source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
+ int read = source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
state.familyLength);
+ assert read == state.familyLength;
}
// read flag
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
index 925801a..590baf5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
@@ -16,14 +16,10 @@
*/
package org.apache.hadoop.hbase.io.encoding;
-import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
-import java.io.FilterOutputStream;
import java.io.IOException;
-import java.lang.reflect.Field;
import java.nio.ByteBuffer;
-import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.KeyValue;
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index e804ece..30fa08b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1673,6 +1673,21 @@ public class Bytes {
}
/**
+ * Copy the byte array given in parameter and return an instance
+ * of a new byte array with the same length and the same content.
+ * @param bytes the byte array to copy from
+ * @return a copy of the given designated byte array
+ * @param offset
+ * @param length
+ */
+ public static byte [] copy(byte [] bytes, final int offset, final int length) {
+ if (bytes == null) return null;
+ byte [] result = new byte[length];
+ System.arraycopy(bytes, offset, result, 0, length);
+ return result;
+ }
+
+ /**
* Search sorted array "a" for byte "key". I can't remember if I wrote this or copied it from
* somewhere. (mcorgan)
* @param a Array to search. Entries must be sorted and unique.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
index e3d8cb2..e3cf837 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
@@ -34,6 +34,9 @@ import com.google.common.primitives.Bytes;
* Generate list of key values which are very useful to test data block encoding
* and compression.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="RV_ABSOLUTE_VALUE_OF_RANDOM_INT",
+ justification="Should probably fix")
public class RedundantKVGenerator {
// row settings
static byte[] DEFAULT_COMMON_PREFIX = new byte[0];
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
index d3eaf7d..1af5f62 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
@@ -183,7 +183,6 @@ public class DemoClient {
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes);
// non-utf8 is now allowed in row names because HBase stores values as binary
- ByteBuffer bf = ByteBuffer.wrap(invalid);
mutations = new ArrayList