Index: hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (working copy)
@@ -48,7 +48,7 @@
/**
* Tracks the availability of the catalog tables
* .META..
- *
+ *
* This class is "read-only" in that the locations of the catalog tables cannot
* be explicitly set. Instead, ZooKeeper is used to learn of the availability
* and location of .META..
@@ -65,7 +65,7 @@
// servers when they needed to know of meta movement but also by
// client-side (inside in HTable) so rather than figure meta
// locations on fault, the client would instead get notifications out of zk.
- //
+ //
// But this original intent is frustrated by the fact that this class has to
// read an hbase table, the -ROOT- table, to figure out the .META. region
// location which means we depend on an HConnection. HConnection will do
@@ -110,13 +110,6 @@
private boolean instantiatedzkw = false;
private Abortable abortable;
- /*
- * Do not clear this address once set. Its needed when we do
- * server shutdown processing -- we need to know who had .META. last. If you
- * want to know if the address is good, rely on {@link #metaAvailable} value.
- */
- private ServerName metaLocation;
-
private boolean stopped = false;
static final byte [] META_REGION_NAME =
@@ -147,7 +140,7 @@
* @param abortable If fatal exception we'll call abort on this. May be null.
* If it is we'll use the Connection associated with the passed
* {@link Configuration} as our Abortable.
- * @throws IOException
+ * @throws IOException
*/
public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
Abortable abortable)
@@ -193,7 +186,7 @@
* Determines current availability of catalog tables and ensures all further
* transitions of either region are tracked.
* @throws IOException
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public void start() throws IOException, InterruptedException {
LOG.debug("Starting catalog tracker " + this);
@@ -235,7 +228,7 @@
* not currently available.
* @return {@link ServerName} for server hosting .META. or null
* if none available
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public ServerName getMetaLocation() throws InterruptedException {
return this.metaRegionTracker.getMetaRegionLocation();
@@ -309,8 +302,6 @@
LOG.info(".META. still not available, sleeping and retrying." +
" Reason: " + e.getMessage());
}
- } catch (IOException e) {
- LOG.info("Retrying", e);
}
}
}
@@ -356,7 +347,7 @@
} else {
throw ioe;
}
-
+
}
return protocol;
}
@@ -406,7 +397,7 @@
}
}
LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
- " at address=" + address + "; " + t);
+ " at address=" + address + ", exception=" + t);
return false;
}
@@ -416,7 +407,7 @@
* the internal call to {@link #waitForMetaServerConnection(long)}.
* @return True if the .META. location is healthy.
* @throws IOException
- * @throws InterruptedException
+ * @throws InterruptedException
*/
public boolean verifyMetaRegionLocation(final long timeout)
throws InterruptedException, IOException {
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java (working copy)
@@ -63,6 +63,11 @@
}
@Override
+ public int hashCode() {
+ return this.action.hashCode();
+ }
+
+ @Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || getClass() != obj.getClass()) return false;
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java (working copy)
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.client;
+import java.util.ArrayList;
+import java.util.List;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
@@ -24,10 +27,6 @@
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.util.Bytes;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
/**
* Performs Append operations on a single row.
*
@@ -66,12 +65,24 @@
* Create a Append operation for the specified row.
*
* At least one column must be appended to.
- * @param row row key
+ * @param row row key; makes a local copy of passed in array.
*/
public Append(byte[] row) {
- this.row = Arrays.copyOf(row, row.length);
+ this(row, 0, row.length);
}
+ /** Create a Append operation for the specified row.
+ *
+ * At least one column must be appended to.
+ * @param rowArray Makes a copy out of this buffer.
+ * @param rowOffset
+ * @param rowLength
+ */
+ public Append(final byte [] rowArray, final int rowOffset, final int rowLength) {
+ checkRow(rowArray, rowOffset, rowLength);
+ this.row = Bytes.copy(rowArray, rowOffset, rowLength);
+ }
+
/**
* Add the specified column and value to this Append operation.
* @param family family name
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java (working copy)
@@ -19,6 +19,11 @@
package org.apache.hadoop.hbase.client;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
@@ -26,11 +31,6 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
/**
* Used to perform Delete operations on a single row.
*
@@ -91,11 +91,30 @@
* @param timestamp maximum version timestamp (only for delete row)
*/
public Delete(byte [] row, long timestamp) {
- this.row = row;
- this.ts = timestamp;
+ this(row, 0, row.length, timestamp);
}
/**
+ * Create a Delete operation for the specified row and timestamp.
+ *
+ * If no further operations are done, this will delete all columns in all
+ * families of the specified row with a timestamp less than or equal to the
+ * specified timestamp.
+ *
+ * This timestamp is ONLY used for a delete row operation. If specifying
+ * families or columns, you must specify each timestamp individually.
+ * @param rowArray We make a local copy of this passed in row.
+ * @param rowOffset
+ * @param rowLength
+ * @param ts maximum version timestamp (only for delete row)
+ */
+ public Delete(final byte [] rowArray, final int rowOffset, final int rowLength, long ts) {
+ checkRow(rowArray, rowOffset, rowLength);
+ this.row = Bytes.copy(rowArray, rowOffset, rowLength);
+ this.ts = ts;
+ }
+
+ /**
* @param d Delete to clone.
*/
public Delete(final Delete d) {
@@ -121,10 +140,8 @@
}
if (Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(),
kv.getRowOffset(), kv.getRowLength()) != 0) {
- throw new IOException("The row in the recently added KeyValue "
- + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(),
- kv.getRowLength()) + " doesn't match the original one "
- + Bytes.toStringBinary(this.row));
+ throw new WrongRowIOException("The row in " + kv.toString() +
+ " doesn't match the original one " + Bytes.toStringBinary(this.row));
}
byte [] family = kv.getFamily();
List extends Cell> list = familyMap.get(family);
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java (working copy)
@@ -19,14 +19,6 @@
package org.apache.hadoop.hbase.client;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.util.Bytes;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
@@ -37,6 +29,14 @@
import java.util.TreeMap;
import java.util.TreeSet;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.util.Bytes;
+
/**
* Used to perform Get operations on a single row.
*
@@ -83,6 +83,7 @@
* @param row row key
*/
public Get(byte [] row) {
+ Mutation.checkRow(row);
this.row = row;
}
@@ -388,10 +389,18 @@
//Row
@Override
public int compareTo(Row other) {
+ // TODO: This is wrong. Can't have two gets the same just because on same row.
return Bytes.compareTo(this.getRow(), other.getRow());
}
@Override
+ public int hashCode() {
+ // TODO: This is wrong. Can't have two gets the same just because on same row. But it
+ // matches how equals works currently and gets rid of the findbugs warning.
+ return Bytes.hashCode(this.getRow());
+ }
+
+ @Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
@@ -400,6 +409,7 @@
return false;
}
Row other = (Row) obj;
+ // TODO: This is wrong. Can't have two gets the same just because on same row.
return compareTo(other) == 0;
}
-}
+}
\ No newline at end of file
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy)
@@ -1350,7 +1350,7 @@
throws IOException, InterruptedException {
compact(tableNameOrRegionName, null, false);
}
-
+
/**
* Compact a column family within a table or region.
* Asynchronous operation.
@@ -1404,7 +1404,7 @@
throws IOException, InterruptedException {
compact(tableNameOrRegionName, null, true);
}
-
+
/**
* Major compact a column family within a table or region.
* Asynchronous operation.
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java (working copy)
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.client;
import java.io.IOException;
-import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
@@ -28,7 +27,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.io.TimeRange;
@@ -52,17 +50,47 @@
private TimeRange tr = new TimeRange();
/**
- * Create a Increment operation for the specified row, using an existing row
- * lock.
+ * Create a Increment operation for the specified row.
*
* At least one column must be incremented.
- * @param row row key
+ * @param row row key (we will make a copy of this).
*/
public Increment(byte [] row) {
- if (row == null || row.length > HConstants.MAX_ROW_LENGTH) {
- throw new IllegalArgumentException("Row key is invalid");
+ this(row, 0, row.length);
+ }
+
+ /**
+ * Create a Increment operation for the specified row.
+ *
+ * At least one column must be incremented.
+ * @param row row key (we will make a copy of this).
+ */
+ public Increment(final byte [] row, final int offset, final int length) {
+ checkRow(row, offset, length);
+ this.row = Bytes.copy(row, offset, length);
+ }
+
+ /**
+ * Add the specified KeyValue to this operation.
+ * @param cell individual Cell
+ * @return this
+ * @throws java.io.IOException e
+ */
+ @SuppressWarnings("unchecked")
+ public Increment add(Cell cell) throws IOException{
+ KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
+ byte [] family = kv.getFamily();
+ List extends Cell> list = getCellList(family);
+ //Checking that the row of the kv is the same as the put
+ int res = Bytes.compareTo(this.row, 0, row.length,
+ kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
+ if (res != 0) {
+ throw new WrongRowIOException("The row in " + kv.toString() +
+ " doesn't match the original one " + Bytes.toStringBinary(this.row));
}
- this.row = Arrays.copyOf(row, row.length);
+ ((List)list).add(kv);
+ familyMap.put(family, list);
+ return this;
}
/**
@@ -204,11 +232,20 @@
@Override
public int compareTo(Row i) {
+ // TODO: This is wrong. Can't have two the same just because on same row.
return Bytes.compareTo(this.getRow(), i.getRow());
}
@Override
+ public int hashCode() {
+ // TODO: This is wrong. Can't have two gets the same just because on same row. But it
+ // matches how equals works currently and gets rid of the findbugs warning.
+ return Bytes.hashCode(this.getRow());
+ }
+
+ @Override
public boolean equals(Object obj) {
+ // TODO: This is wrong. Can't have two the same just because on same row.
if (this == obj) {
return true;
}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java (working copy)
@@ -250,6 +250,7 @@
}
/**
+ * Number of KeyValues carried by this Mutation.
* @return the total number of KeyValues
*/
public int size() {
@@ -299,4 +300,36 @@
heapsize += getAttributeSize();
return heapsize;
}
-}
+
+ /**
+ * @param row Row to check
+ * @throws IllegalArgumentException Thrown if row is empty or null or
+ * > {@link HConstants#MAX_ROW_LENGTH}
+ * @return row
+ */
+ static byte [] checkRow(final byte [] row) {
+ return checkRow(row, 0, row == null? 0: row.length);
+ }
+
+ /**
+ * @param row Row to check
+ * @param offset
+ * @param length
+ * @throws IllegalArgumentException Thrown if row is empty or null or
+ * > {@link HConstants#MAX_ROW_LENGTH}
+ * @return row
+ */
+ static byte [] checkRow(final byte [] row, final int offset, final int length) {
+ if (row == null) {
+ throw new IllegalArgumentException("Row buffer is null");
+ }
+ if (length == 0) {
+ throw new IllegalArgumentException("Row length is 0");
+ }
+ if (length > HConstants.MAX_ROW_LENGTH) {
+ throw new IllegalArgumentException("Row length " + length + " is > " +
+ HConstants.MAX_ROW_LENGTH);
+ }
+ return row;
+ }
+}
\ No newline at end of file
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java (working copy)
@@ -57,14 +57,23 @@
/**
* Create a Put operation for the specified row, using a given timestamp.
*
- * @param row row key
+ * @param row row key; we make a copy of what we are passed to keep local.
* @param ts timestamp
*/
public Put(byte[] row, long ts) {
- if(row == null || row.length > HConstants.MAX_ROW_LENGTH) {
- throw new IllegalArgumentException("Row key is invalid");
- }
- this.row = Arrays.copyOf(row, row.length);
+ this(row, 0, row.length, ts);
+ }
+
+ /**
+ * We make a copy of the passed in row key to keep local.
+ * @param rowArray
+ * @param rowOffset
+ * @param rowLength
+ * @param ts
+ */
+ public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) {
+ checkRow(rowArray, rowOffset, rowLength);
+ this.row = Bytes.copy(rowArray, rowOffset, rowLength);
this.ts = ts;
}
@@ -125,11 +134,9 @@
//Checking that the row of the kv is the same as the put
int res = Bytes.compareTo(this.row, 0, row.length,
kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
- if(res != 0) {
- throw new IOException("The row in the recently added KeyValue " +
- Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(),
- kv.getRowLength()) + " doesn't match the original one " +
- Bytes.toStringBinary(this.row));
+ if (res != 0) {
+ throw new WrongRowIOException("The row in " + kv.toString() +
+ " doesn't match the original one " + Bytes.toStringBinary(this.row));
}
((List)list).add(kv);
familyMap.put(family, list);
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java (working copy)
@@ -17,23 +17,26 @@
*/
package org.apache.hadoop.hbase.client;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Bytes;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
/**
* Performs multiple mutations atomically on a single row.
* Currently {@link Put} and {@link Delete} are supported.
*
* The mutations are performed in the order in which they
* were added.
+ *
+ * We compare and equate mutations based off their row so be careful putting RowMutations
+ * into Sets or using them as keys in Maps.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@@ -89,6 +92,16 @@
}
@Override
+ public boolean equals(Object obj) {
+ if (obj == this) return true;
+ if (obj instanceof RowMutations) {
+ RowMutations other = (RowMutations)obj;
+ return compareTo(other) == 0;
+ }
+ return false;
+ }
+
+ @Override
public byte[] getRow() {
return row;
}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java (revision 0)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java (working copy)
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.exceptions.HBaseIOException;
+
+public class WrongRowIOException extends HBaseIOException {
+ private static final long serialVersionUID = -5849522209440123059L;
+
+ public WrongRowIOException(final String msg) {
+ super(msg);
+ }
+}
\ No newline at end of file
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java (working copy)
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.exceptions;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Exception thrown by access-related methods.
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Thrown if a coprocessor encounters any exception.
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java (working copy)
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* This exception is thrown when attempts to read an HFile fail due to corruption or truncation
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java (working copy)
@@ -56,4 +56,4 @@
public DoNotRetryIOException(Throwable cause) {
super(cause);
}
-}
+}
\ No newline at end of file
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java (working copy)
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
/**
* Reports a problem with a lease
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java (working copy)
@@ -23,6 +23,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
/**
@@ -48,6 +49,11 @@
}
@Override
+ public int hashCode() {
+ return 0;
+ }
+
+ @Override
public int compareTo(byte[] value, int offset, int length) {
throw new UnsupportedOperationException();
}
@@ -69,9 +75,9 @@
*/
public static NullComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {
- ComparatorProtos.NullComparator proto;
try {
- proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
+ @SuppressWarnings("unused")
+ ComparatorProtos.NullComparator proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
} catch (InvalidProtocolBufferException e) {
throw new DeserializationException(e);
}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java (working copy)
@@ -46,7 +46,7 @@
private static final int FIRST_CODE = values()[0].code;
/** Return the object represented by the code. */
- private static AuthMethod valueOf(byte code) {
+ public static AuthMethod valueOf(byte code) {
final int i = (code & 0xff) - FIRST_CODE;
return i < 0 || i >= values().length ? null : values()[i];
}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (revision 1457026)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (working copy)
@@ -1587,7 +1587,7 @@
try {
getReplicationZnodesDump(zkw, sb);
} catch (KeeperException ke) {
- LOG.warn("Couldn't get the replication znode dump." + ke.getStackTrace());
+ LOG.warn("Couldn't get the replication znode dump", ke);
}
sb.append("\nQuorum Server Statistics:");
String[] servers = zkw.getQuorum().split(",");
Index: hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java
===================================================================
--- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java (revision 1457026)
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java (working copy)
@@ -29,9 +29,10 @@
@Category(SmallTests.class)
public class TestAttributes {
+ private static final byte [] ROW = new byte [] {'r'};
@Test
public void testPutAttributes() {
- Put put = new Put(new byte [] {});
+ Put put = new Put(ROW);
Assert.assertTrue(put.getAttributesMap().isEmpty());
Assert.assertNull(put.getAttribute("absent"));
@@ -79,7 +80,7 @@
@Test
public void testDeleteAttributes() {
- Delete del = new Delete(new byte [] {});
+ Delete del = new Delete(new byte [] {'r'});
Assert.assertTrue(del.getAttributesMap().isEmpty());
Assert.assertNull(del.getAttribute("absent"));
@@ -126,7 +127,7 @@
@Test
public void testGetId() {
- Get get = new Get(null);
+ Get get = new Get(ROW);
Assert.assertNull("Make sure id is null if unset", get.toMap().get("id"));
get.setId("myId");
Assert.assertEquals("myId", get.toMap().get("id"));
@@ -134,7 +135,7 @@
@Test
public void testAppendId() {
- Append append = new Append(Bytes.toBytes("testRow"));
+ Append append = new Append(ROW);
Assert.assertNull("Make sure id is null if unset", append.toMap().get("id"));
append.setId("myId");
Assert.assertEquals("myId", append.toMap().get("id"));
@@ -142,7 +143,7 @@
@Test
public void testDeleteId() {
- Delete delete = new Delete(new byte [] {});
+ Delete delete = new Delete(ROW);
Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id"));
delete.setId("myId");
Assert.assertEquals("myId", delete.toMap().get("id"));
@@ -150,7 +151,7 @@
@Test
public void testPutId() {
- Put put = new Put(new byte [] {});
+ Put put = new Put(ROW);
Assert.assertNull("Make sure id is null if unset", put.toMap().get("id"));
put.setId("myId");
Assert.assertEquals("myId", put.toMap().get("id"));
@@ -163,6 +164,4 @@
scan.setId("myId");
Assert.assertEquals("myId", scan.toMap().get("id"));
}
-
-}
-
+}
\ No newline at end of file
Index: hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java
===================================================================
--- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java (revision 1457026)
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java (working copy)
@@ -34,6 +34,7 @@
// TODO: cover more test cases
@Category(SmallTests.class)
public class TestGet {
+ private static final byte [] ROW = new byte [] {'r'};
@Test
public void testAttributesSerialization() throws IOException {
Get get = new Get(Bytes.toBytes("row"));
@@ -53,7 +54,7 @@
@Test
public void testGetAttributes() {
- Get get = new Get(null);
+ Get get = new Get(ROW);
Assert.assertTrue(get.getAttributesMap().isEmpty());
Assert.assertNull(get.getAttribute("absent"));
@@ -100,11 +101,10 @@
@Test
public void testNullQualifier() {
- Get get = new Get(null);
+ Get get = new Get(ROW);
byte[] family = Bytes.toBytes("family");
get.addColumn(family, null);
Set qualifiers = get.getFamilyMap().get(family);
Assert.assertEquals(1, qualifiers.size());
}
-}
-
+}
\ No newline at end of file
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java (working copy)
@@ -34,6 +34,9 @@
* regionname, from row. See KeyValue for how it has a special comparator to do .META. cells
* and yet another for -ROOT-.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="UNKNOWN",
+ justification="Findbugs doesn't like the way we are negating the result of a compare in below")
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CellComparator implements Comparator| , Serializable{
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java (working copy)
@@ -872,9 +872,11 @@
/**
* Clones a KeyValue. This creates a copy, re-allocating the buffer.
* @return Fully copied clone of this KeyValue
+ * @throws CloneNotSupportedException
*/
@Override
- public KeyValue clone() {
+ public KeyValue clone() throws CloneNotSupportedException {
+ super.clone();
byte [] b = new byte[this.length];
System.arraycopy(this.bytes, this.offset, b, 0, this.length);
KeyValue ret = new KeyValue(b, 0, b.length);
@@ -886,15 +888,6 @@
}
/**
- * Creates a deep copy of this KeyValue, re-allocating the buffer.
- * Same function as {@link #clone()}. Added for clarity vs shallowCopy()
- * @return Deep copy of this KeyValue
- */
- public KeyValue deepCopy() {
- return clone();
- }
-
- /**
* Creates a shallow copy of this KeyValue, reusing the data byte buffer.
* http://en.wikipedia.org/wiki/Object_copy
* @return Shallow copy of this KeyValue
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java (working copy)
@@ -22,12 +22,12 @@
import org.apache.hadoop.hbase.Cell;
-abstract class BaseDecoder implements Codec.Decoder {
- final InputStream in;
+public abstract class BaseDecoder implements Codec.Decoder {
+ protected final InputStream in;
private boolean hasNext = true;
private Cell current = null;
- BaseDecoder(final InputStream in) {
+ public BaseDecoder(final InputStream in) {
this.in = in;
}
@@ -50,7 +50,7 @@
* @return extract a Cell
* @throws IOException
*/
- abstract Cell parseCell() throws IOException;
+ protected abstract Cell parseCell() throws IOException;
@Override
public Cell current() {
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.hadoop.hbase.Cell;
-abstract class BaseEncoder implements Codec.Encoder {
+public abstract class BaseEncoder implements Codec.Encoder {
protected final OutputStream out;
// This encoder is 'done' once flush has been called.
protected boolean flushed = false;
@@ -34,7 +34,7 @@
@Override
public abstract void write(Cell cell) throws IOException;
- void checkFlushed() throws CodecException {
+ protected void checkFlushed() throws CodecException {
if (this.flushed) throw new CodecException("Flushed; done");
}
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java (working copy)
@@ -77,7 +77,7 @@
super(in);
}
- Cell parseCell() throws IOException {
+ protected Cell parseCell() throws IOException {
byte [] row = readByteArray(this.in);
byte [] family = readByteArray(in);
byte [] qualifier = readByteArray(in);
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java (working copy)
@@ -66,7 +66,7 @@
super(in);
}
- Cell parseCell() throws IOException {
+ protected Cell parseCell() throws IOException {
return KeyValue.iscreate(in);
}
}
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java (revision 0)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java (working copy)
@@ -0,0 +1,137 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.Channels;
+import java.nio.channels.WritableByteChannel;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Not thread safe!
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ByteBufferOutputStream extends OutputStream {
+
+ protected ByteBuffer buf;
+
+ public ByteBufferOutputStream(int capacity) {
+ this(capacity, false);
+ }
+
+ public ByteBufferOutputStream(int capacity, boolean useDirectByteBuffer) {
+ if (useDirectByteBuffer) {
+ buf = ByteBuffer.allocateDirect(capacity);
+ } else {
+ buf = ByteBuffer.allocate(capacity);
+ }
+ }
+
+ public int size() {
+ return buf.position();
+ }
+
+ /**
+ * This flips the underlying BB so be sure to use it _last_!
+ * @return ByteBuffer
+ */
+ public ByteBuffer getByteBuffer() {
+ buf.flip();
+ return buf;
+ }
+
+ private void checkSizeAndGrow(int extra) {
+ if ( (buf.position() + extra) > buf.limit()) {
+ // size calculation is complex, because we could overflow negative,
+ // and/or not allocate enough space. this fixes that.
+ int newSize = (int)Math.min((((long)buf.capacity()) * 2),
+ (long)(Integer.MAX_VALUE));
+ newSize = Math.max(newSize, buf.position() + extra);
+
+ ByteBuffer newBuf = ByteBuffer.allocate(newSize);
+ buf.flip();
+ newBuf.put(buf);
+ buf = newBuf;
+ }
+ }
+
+ // OutputStream
+ @Override
+ public void write(int b) throws IOException {
+ checkSizeAndGrow(Bytes.SIZEOF_BYTE);
+
+ buf.put((byte)b);
+ }
+
+ /**
+ * Writes the complete contents of this byte buffer output stream to
+ * the specified output stream argument.
+ *
+ * @param out the output stream to which to write the data.
+ * @exception IOException if an I/O error occurs.
+ */
+ public synchronized void writeTo(OutputStream out) throws IOException {
+ WritableByteChannel channel = Channels.newChannel(out);
+ ByteBuffer bb = buf.duplicate();
+ bb.flip();
+ channel.write(bb);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ checkSizeAndGrow(b.length);
+
+ buf.put(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ checkSizeAndGrow(len);
+
+ buf.put(b, off, len);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ // noop
+ }
+
+ @Override
+ public void close() throws IOException {
+ // noop again. heh
+ }
+
+ public byte[] toByteArray(int offset, int length) {
+ ByteBuffer bb = buf.duplicate();
+ bb.flip();
+
+ byte[] chunk = new byte[length];
+
+ bb.position(offset);
+ bb.get(chunk, 0, length);
+ return chunk;
+ }
+}
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java (working copy)
@@ -209,8 +209,9 @@
state.familyNameWithSize =
new byte[(state.familyLength & 0xff) + KeyValue.FAMILY_LENGTH_SIZE];
state.familyNameWithSize[0] = state.familyLength;
- source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
+ int read = source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
state.familyLength);
+ assert read == state.familyLength;
}
// read flag
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java (working copy)
@@ -16,14 +16,10 @@
*/
package org.apache.hadoop.hbase.io.encoding;
-import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
-import java.io.FilterOutputStream;
import java.io.IOException;
-import java.lang.reflect.Field;
import java.nio.ByteBuffer;
-import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.KeyValue;
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java (working copy)
@@ -1673,6 +1673,21 @@
}
/**
+ * Copy the byte array given in parameter and return an instance
+ * of a new byte array with the same length and the same content.
+ * @param bytes the byte array to copy from
+ * @return a copy of the given designated byte array
+ * @param offset
+ * @param length
+ */
+ public static byte [] copy(byte [] bytes, final int offset, final int length) {
+ if (bytes == null) return null;
+ byte [] result = new byte[length];
+ System.arraycopy(bytes, offset, result, 0, length);
+ return result;
+ }
+
+ /**
* Search sorted array "a" for byte "key". I can't remember if I wrote this or copied it from
* somewhere. (mcorgan)
* @param a Array to search. Entries must be sorted and unique.
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java (revision 1457026)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java (working copy)
@@ -34,6 +34,9 @@
* Generate list of key values which are very useful to test data block encoding
* and compression.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="RV_ABSOLUTE_VALUE_OF_RANDOM_INT",
+ justification="Should probably fix")
public class RedundantKVGenerator {
// row settings
static byte[] DEFAULT_COMMON_PREFIX = new byte[0];
Index: hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
===================================================================
--- hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java (revision 1457026)
+++ hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java (working copy)
@@ -183,7 +183,6 @@
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes);
// non-utf8 is now allowed in row names because HBase stores values as binary
- ByteBuffer bf = ByteBuffer.wrap(invalid);
mutations = new ArrayList();
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal));
Index: hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
===================================================================
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java (revision 1457026)
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java (working copy)
@@ -137,6 +137,13 @@
.compareTo(impl.regionWrapper.getRegionName());
}
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == this) return true;
+ if (!(obj instanceof MetricsRegionSourceImpl)) return false;
+ return compareTo((MetricsRegionSourceImpl)obj) == 0;
+ }
+
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
if (closed) return;
Index: hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
===================================================================
--- hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java (revision 1457026)
+++ hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java (working copy)
@@ -36,6 +36,9 @@
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
* are package private.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="LI_LAZY_INIT_STATIC",
+ justification="Yeah, its weird but its what we want")
public class JmxCacheBuster {
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
private static Object lock = new Object();
Index: hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
===================================================================
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java (revision 1457026)
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java (working copy)
@@ -20,6 +20,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionSourceImpl;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
@@ -138,6 +139,13 @@
.compareTo(impl.regionWrapper.getRegionName());
}
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == this) return true;
+ if (!(obj instanceof MetricsRegionSourceImpl)) return false;
+ return compareTo((MetricsRegionSourceImpl)obj) == 0;
+ }
+
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
if (closed) return;
Index: hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java
===================================================================
--- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java (revision 1457026)
+++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java (working copy)
@@ -35,6 +35,9 @@
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
* are package private.
*/
+@edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value="LI_LAZY_INIT_STATIC",
+ justification="Yeah, its weird but its what we want")
public class JmxCacheBuster {
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
private static Object lock = new Object();
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (working copy)
@@ -70,6 +70,7 @@
import org.apache.hadoop.hbase.IpcProtocol;
import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException;
import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
@@ -86,7 +87,6 @@
import org.apache.hadoop.hbase.security.SaslStatus;
import org.apache.hadoop.hbase.security.SaslUtil;
import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java (working copy)
@@ -100,9 +100,9 @@
private int timestampKeyColumnIndex = DEFAULT_TIMESTAMP_COLUMN_INDEX;
- public static String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY";
+ public static final String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY";
- public static String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY";
+ public static final String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY";
/**
* @param columnsSpecification the list of columns to parser out, comma separated.
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java (working copy)
@@ -41,7 +41,11 @@
throws java.io.IOException, InterruptedException {
TreeSet map = new TreeSet(KeyValue.COMPARATOR);
for (KeyValue kv: kvs) {
- map.add(kv.clone());
+ try {
+ map.add(kv.clone());
+ } catch (CloneNotSupportedException e) {
+ throw new java.io.IOException(e);
+ }
}
context.setStatus("Read " + map.getClass());
int index = 0;
@@ -50,4 +54,4 @@
if (index > 0 && index % 100 == 0) context.setStatus("Wrote " + index);
}
}
-}
+}
\ No newline at end of file
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy)
@@ -105,15 +105,16 @@
LOG.error("Couldn't delete " + tempTableDir);
}
- LOG.debug("Table '" + tableName + "' archived!");
+ LOG.debug("Table '" + Bytes.toString(tableName) + "' archived!");
} finally {
+ String tableNameStr = Bytes.toString(tableName);
// 6. Update table descriptor cache
- LOG.debug("Removing '" + tableName + "' descriptor.");
+ LOG.debug("Removing '" + tableNameStr + "' descriptor.");
this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
// 7. If entry for this table in zk, and up in AssignmentManager, remove it.
- LOG.debug("Marking '" + tableName + "' as deleted.");
- am.getZKTable().setDeletedTable(Bytes.toString(tableName));
+ LOG.debug("Marking '" + tableNameStr + "' as deleted.");
+ am.getZKTable().setDeletedTable(tableNameStr);
}
if (cpHost != null) {
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -19,8 +19,9 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.EOFException;
+import java.io.IOException;
import java.io.FileNotFoundException;
-import java.io.IOException;
+import org.apache.hadoop.fs.permission.FsPermission;
import java.io.InterruptedIOException;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Constructor;
@@ -69,24 +70,17 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CompoundConfiguration;
-import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException;
-import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
-import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
-import org.apache.hadoop.hbase.exceptions.RegionTooBusyException;
-import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
+import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
@@ -100,6 +94,13 @@
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException;
+import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.RegionTooBusyException;
+import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
+import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
import org.apache.hadoop.hbase.exceptions.WrongRegionException;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -112,7 +113,6 @@
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.ipc.HBaseServer;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
-import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
@@ -1826,6 +1826,10 @@
}
/**
+ * Row needed by below method.
+ */
+ private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
+ /**
* This is used only by unit tests. Not required to be a public API.
* @param familyMap map of family to edits for the given family.
* @param writeToWAL
@@ -1833,7 +1837,7 @@
*/
void delete(NavigableMap> familyMap, UUID clusterId,
boolean writeToWAL) throws IOException {
- Delete delete = new Delete(HConstants.EMPTY_BYTE_ARRAY);
+ Delete delete = new Delete(FOR_UNIT_TESTS_ONLY);
delete.setFamilyMap(familyMap);
delete.setClusterId(clusterId);
delete.setWriteToWAL(writeToWAL);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java (working copy)
@@ -51,7 +51,6 @@
* thrift server dies or is shut down before everything in the queue is drained.
*
*/
-
public class IncrementCoalescer implements IncrementCoalescerMBean {
/**
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java (revision 1457026)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java (working copy)
@@ -1,136 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.ByteBuffer;
-import java.nio.channels.Channels;
-import java.nio.channels.WritableByteChannel;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Not thread safe!
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class ByteBufferOutputStream extends OutputStream {
-
- protected ByteBuffer buf;
-
- public ByteBufferOutputStream(int capacity) {
- this(capacity, false);
- }
-
- public ByteBufferOutputStream(int capacity, boolean useDirectByteBuffer) {
- if (useDirectByteBuffer) {
- buf = ByteBuffer.allocateDirect(capacity);
- } else {
- buf = ByteBuffer.allocate(capacity);
- }
- }
-
- public int size() {
- return buf.position();
- }
-
- /**
- * This flips the underlying BB so be sure to use it _last_!
- * @return ByteBuffer
- */
- public ByteBuffer getByteBuffer() {
- buf.flip();
- return buf;
- }
-
- private void checkSizeAndGrow(int extra) {
- if ( (buf.position() + extra) > buf.limit()) {
- // size calculation is complex, because we could overflow negative,
- // and/or not allocate enough space. this fixes that.
- int newSize = (int)Math.min((((long)buf.capacity()) * 2),
- (long)(Integer.MAX_VALUE));
- newSize = Math.max(newSize, buf.position() + extra);
-
- ByteBuffer newBuf = ByteBuffer.allocate(newSize);
- buf.flip();
- newBuf.put(buf);
- buf = newBuf;
- }
- }
-
- // OutputStream
- @Override
- public void write(int b) throws IOException {
- checkSizeAndGrow(Bytes.SIZEOF_BYTE);
-
- buf.put((byte)b);
- }
-
- /**
- * Writes the complete contents of this byte buffer output stream to
- * the specified output stream argument.
- *
- * @param out the output stream to which to write the data.
- * @exception IOException if an I/O error occurs.
- */
- public synchronized void writeTo(OutputStream out) throws IOException {
- WritableByteChannel channel = Channels.newChannel(out);
- ByteBuffer bb = buf.duplicate();
- bb.flip();
- channel.write(bb);
- }
-
- @Override
- public void write(byte[] b) throws IOException {
- checkSizeAndGrow(b.length);
-
- buf.put(b);
- }
-
- @Override
- public void write(byte[] b, int off, int len) throws IOException {
- checkSizeAndGrow(len);
-
- buf.put(b, off, len);
- }
-
- @Override
- public void flush() throws IOException {
- // noop
- }
-
- @Override
- public void close() throws IOException {
- // noop again. heh
- }
-
- public byte[] toByteArray(int offset, int length) {
- ByteBuffer bb = buf.duplicate();
- bb.flip();
-
- byte[] chunk = new byte[length];
-
- bb.position(offset);
- bb.get(chunk, 0, length);
- return chunk;
- }
-}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (working copy)
@@ -261,8 +261,9 @@
HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME);
// dummy, just to open the connection
- localMeta.exists(new Get(HConstants.LAST_ROW));
- ipMeta.exists(new Get(HConstants.LAST_ROW));
+ final byte [] row = new byte [] {'r'};
+ localMeta.exists(new Get(row));
+ ipMeta.exists(new Get(row));
// make sure they aren't the same
ZooKeeperWatcher z1 =
@@ -359,8 +360,26 @@
"testMasterAddressManagerFromZK", null);
// Save the previous ACL
- Stat s = new Stat();
- List oldACL = zk.getACL("/", s);
+ Stat s = null;
+ List oldACL = null;
+ while (true) {
+ try {
+ s = new Stat();
+ oldACL = zk.getACL("/", s);
+ break;
+ } catch (KeeperException e) {
+ switch (e.code()) {
+ case CONNECTIONLOSS:
+ case SESSIONEXPIRED:
+ case OPERATIONTIMEOUT:
+ LOG.warn("Possibly transient ZooKeeper exception", e);
+ Threads.sleep(100);
+ break;
+ default:
+ throw e;
+ }
+ }
+ }
// I set this acl after the attempted creation of the cluster home node.
// Add retries in case of retryable zk exceptions.
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (working copy)
@@ -33,6 +33,9 @@
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.ScannerCallable;
+import org.apache.hadoop.hbase.ipc.HBaseClient;
+import org.apache.hadoop.hbase.ipc.HBaseServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -40,6 +43,8 @@
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.Level;
/**
* Test {@link MetaReader}, {@link MetaEditor}.
@@ -63,10 +68,12 @@
public boolean isAborted() {
return abort.get();
}
-
};
@BeforeClass public static void beforeClass() throws Exception {
+ ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL);
+ ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
UTIL.startMiniCluster(3);
Configuration c = new Configuration(UTIL.getConfiguration());
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy)
@@ -4306,6 +4306,8 @@
fail("Should have thrown IllegalArgumentException");
} catch (IllegalArgumentException iax) {
// success
+ } catch (NullPointerException npe) {
+ // success
}
// try null family
try {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java (working copy)
@@ -22,6 +22,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.Arrays;
@@ -388,20 +389,19 @@
table.put(put);
table.flushCommits();
- //Try getting the row with an empty row key and make sure the other base cases work as well
- Result res = table.get(new Get(new byte[0]));
- assertTrue(res.isEmpty() == true);
+ //Try getting the row with an empty row key
+ Result res = null;
+ try {
+ res = table.get(new Get(new byte[0]));
+ fail();
+ } catch (IllegalArgumentException e) {
+ // Expected.
+ }
+ assertTrue(res == null);
res = table.get(new Get(Bytes.toBytes("r1-not-exist")));
assertTrue(res.isEmpty() == true);
res = table.get(new Get(ROW_BYTES));
assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES));
-
- //Now actually put in a row with an empty row key
- put = new Put(new byte[0]);
- put.add(FAMILY, COL_QUAL, VAL_BYTES);
- table.put(put);
- table.flushCommits();
- res = table.get(new Get(new byte[0]));
- assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES));
+ table.close();
}
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java (working copy)
@@ -19,11 +19,12 @@
*/
package org.apache.hadoop.hbase.client;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import java.util.ArrayList;
import java.util.List;
-import junit.framework.Assert;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -83,10 +84,12 @@
// SinglePut case
for (int i = 0; i < NUM_REGIONS; i++) {
- Put put = new Put(startRows[i]);
+ byte [] row = startRows[i];
+ if (row == null || row.length <= 0) continue;
+ Put put = new Put(row);
put.add(FAMILY, QUALIFIER, VALUE1);
success = multiplexer.put(TABLE, put);
- Assert.assertTrue(success);
+ assertTrue(success);
// ensure the buffer has been flushed
verifyAllBufferedPutsHaveFlushed(status);
@@ -99,32 +102,35 @@
do {
r = ht.get(get);
} while (r == null || r.getValue(FAMILY, QUALIFIER) == null);
- Assert.assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER)));
+ assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER)));
}
// MultiPut case
List multiput = new ArrayList();
for (int i = 0; i < NUM_REGIONS; i++) {
- Put put = new Put(endRows[i]);
+ byte [] row = endRows[i];
+ if (row == null || row.length <= 0) continue;
+ Put put = new Put(row);
put.add(FAMILY, QUALIFIER, VALUE2);
multiput.add(put);
}
failedPuts = multiplexer.put(TABLE, multiput);
- Assert.assertTrue(failedPuts == null);
+ assertTrue(failedPuts == null);
// ensure the buffer has been flushed
verifyAllBufferedPutsHaveFlushed(status);
// verify that the Get returns the correct result
for (int i = 0; i < NUM_REGIONS; i++) {
- Get get = new Get(endRows[i]);
+ byte [] row = endRows[i];
+ if (row == null || row.length <= 0) continue;
+ Get get = new Get(row);
get.addColumn(FAMILY, QUALIFIER);
Result r;
do {
r = ht.get(get);
} while (r == null || r.getValue(FAMILY, QUALIFIER) == null);
- Assert.assertEquals(0,
- Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER)));
+ assertEquals(0, Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER)));
}
}
@@ -141,7 +147,7 @@
}
} while (status.getTotalBufferedCounter() != 0 && tries != retries);
- Assert.assertEquals("There are still some buffered puts left in the queue",
+ assertEquals("There are still some buffered puts left in the queue",
0, status.getTotalBufferedCounter());
}
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java (working copy)
@@ -0,0 +1,149 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.ConcurrentModificationException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that I can Iterate Client Actions that hold Cells (Get does not have Cells).
+ */
+@Category(SmallTests.class)
+public class TestPutDeleteEtcCellIteration {
+ private static final byte [] ROW = new byte [] {'r'};
+ private static final long TIMESTAMP = System.currentTimeMillis();
+ private static final int COUNT = 10;
+
+ @Test
+ public void testPutIteration() {
+ Put p = new Put(ROW);
+ for (int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ p.add(bytes, bytes, TIMESTAMP, bytes);
+ }
+ int index = 0;
+ for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ byte [] bytes = Bytes.toBytes(index++);
+ cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+ }
+ assertEquals(COUNT, index);
+ }
+
+ @Test (expected = ConcurrentModificationException.class)
+ public void testPutConcurrentModificationOnIteration() {
+ Put p = new Put(ROW);
+ for (int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ p.add(bytes, bytes, TIMESTAMP, bytes);
+ }
+ int index = 0;
+ int trigger = 3;
+ for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ byte [] bytes = Bytes.toBytes(index++);
+ // When we hit the trigger, try inserting a new KV; should trigger exception
+ if (trigger == 3) p.add(bytes, bytes, TIMESTAMP, bytes);
+ cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+ }
+ assertEquals(COUNT, index);
+ }
+
+ @Test
+ public void testDeleteIteration() {
+ Delete d = new Delete(ROW);
+ for (int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ d.deleteColumn(bytes, bytes, TIMESTAMP);
+ }
+ int index = 0;
+ for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ byte [] bytes = Bytes.toBytes(index++);
+ cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, KeyValue.Type.DeleteColumn));
+ }
+ assertEquals(COUNT, index);
+ }
+
+ @Test
+ public void testAppendIteration() {
+ Append a = new Append(ROW);
+ for (int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ a.add(bytes, bytes, bytes);
+ }
+ int index = 0;
+ for (CellScanner cellScanner = a.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ byte [] bytes = Bytes.toBytes(index++);
+ KeyValue kv = (KeyValue)cell;
+ assertTrue(Bytes.equals(kv.getFamily(), bytes));
+ assertTrue(Bytes.equals(kv.getValue(), bytes));
+ }
+ assertEquals(COUNT, index);
+ }
+
+ @Test
+ public void testIncrementIteration() {
+ Increment increment = new Increment(ROW);
+ for (int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ increment.addColumn(bytes, bytes, i);
+ }
+ int index = 0;
+ for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ int value = index;
+ byte [] bytes = Bytes.toBytes(index++);
+ KeyValue kv = (KeyValue)cell;
+ assertTrue(Bytes.equals(kv.getFamily(), bytes));
+ long a = Bytes.toLong(kv.getValue());
+ assertEquals(value, a);
+ }
+ assertEquals(COUNT, index);
+ }
+
+ @Test
+ public void testResultIteration() {
+ Cell [] cells = new Cell[COUNT];
+ for(int i = 0; i < COUNT; i++) {
+ byte [] bytes = Bytes.toBytes(i);
+ cells[i] = new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes);
+ }
+ Result r = new Result(Arrays.asList(cells));
+ int index = 0;
+ for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) {
+ Cell cell = cellScanner.current();
+ byte [] bytes = Bytes.toBytes(index++);
+ cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
+ }
+ assertEquals(COUNT, index);
+ }
+}
\ No newline at end of file
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (working copy)
@@ -318,7 +318,12 @@
// now have all Environments fail
for (int i = 0; i < regions.length; i++) {
try {
- Get g = new Get(regions[i].getStartKey());
+ byte [] r = regions[i].getStartKey();
+ if (r == null || r.length <= 0) {
+ // Its the start row. Can't ask for null. Ask for minimal key instead.
+ r = new byte [] {0};
+ }
+ Get g = new Get(r);
regions[i].get(g);
fail();
} catch (org.apache.hadoop.hbase.exceptions.DoNotRetryIOException xc) {
@@ -342,7 +347,8 @@
findCoprocessor(CoprocessorII.class.getName());
// new map and object created, hence the reference is different
// hence the old entry was indeed removed by the GC and new one has been created
- assertFalse(((CoprocessorII)c2).getSharedData().get("test2") == o2);
+ Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
+ assertFalse(o3 == o2);
}
public void testCoprocessorInterface() throws IOException {
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java (working copy)
@@ -20,10 +20,12 @@
package org.apache.hadoop.hbase.io;
import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
import java.nio.ByteBuffer;
import java.util.ArrayList;
+import java.util.Map;
import java.util.TreeMap;
-import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -44,14 +46,11 @@
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
+import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
-import org.junit.BeforeClass;
-import java.lang.management.ManagementFactory;
-import java.lang.management.RuntimeMXBean;
/**
* Testing the sizing that HeapSize offers and compares to the size given by
@@ -252,10 +251,10 @@
cl = Put.class;
expected = ClassSize.estimateBase(cl, false);
//The actual TreeMap is not included in the above calculation
- expected += ClassSize.TREEMAP;
- Put put = new Put(Bytes.toBytes(""));
+ expected += ClassSize.align(ClassSize.TREEMAP + ClassSize.REFERENCE);
+ Put put = new Put(new byte [] {'0'});
actual = put.heapSize();
- if(expected != actual) {
+ if (expected != actual) {
ClassSize.estimateBase(cl, true);
assertEquals(expected, actual);
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java (working copy)
@@ -477,7 +477,10 @@
HTable table = new HTable(conf, hri.getTableName());
try {
- Get get = new Get(hri.getStartKey());
+ byte [] row = hri.getStartKey();
+ // Check for null/empty row. If we find one, use a key that is likely to be in first region.
+ if (row == null || row.length <= 0) row = new byte [] {'0'};
+ Get get = new Get(row);
while (System.currentTimeMillis() - start < timeout) {
try {
table.get(get);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (working copy)
@@ -164,8 +164,9 @@
/**
* A simple test which verifies the 3 possible states when scanning across snapshot.
* @throws IOException
+ * @throws CloneNotSupportedException
*/
- public void testScanAcrossSnapshot2() throws IOException {
+ public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException {
// we are going to the scanning across snapshot with two kvs
// kv1 should always be returned before kv2
final byte[] one = Bytes.toBytes(1);
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java (revision 1457026)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java (working copy)
@@ -32,7 +32,6 @@
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import java.io.IOException;
@Category(MediumTests.class)
public class TestRegionServerMetrics {
|