diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index db7b074..47acfde 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -541,7 +541,7 @@ public final class ProtobufUtil { MutationType type = proto.getMutateType(); assert type == MutationType.PUT: type.name(); long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; - Put put = null; + Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. @@ -561,9 +561,7 @@ public final class ProtobufUtil { put.add(cell); } } else { - if (proto.hasRow()) { - put = new Put(proto.getRow().asReadOnlyByteBuffer(), timestamp); - } else { + if (put == null) { throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself @@ -640,12 +638,8 @@ public final class ProtobufUtil { throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); - byte [] row = proto.hasRow()? proto.getRow().toByteArray(): null; - long timestamp = HConstants.LATEST_TIMESTAMP; - if (proto.hasTimestamp()) { - timestamp = proto.getTimestamp(); - } - Delete delete = null; + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; + Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. @@ -668,7 +662,9 @@ public final class ProtobufUtil { delete.addDeleteMarker(cell); } } else { - delete = new Delete(row, timestamp); + if (delete == null) { + throw new IllegalArgumentException("row cannot be null"); + } for (ColumnValue column: proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); for (QualifierValue qv: column.getQualifierValueList()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 2af42fe..2f4104c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -74,7 +74,6 @@ import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -152,11 +151,9 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.DNS; @@ -1838,6 +1835,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException e) { regionActionResultBuilder.setException(ResponseConverter.buildException(e)); responseBuilder.addRegionActionResult(regionActionResultBuilder.build()); + if (cellScanner != null) { + skipCellsForMutations(regionAction.getActionList(), cellScanner); + } continue; // For this region it's a failure. } @@ -1884,6 +1884,30 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return responseBuilder.build(); } + private void skipCellsForMutations(List actions, CellScanner cellScanner) { + for (ClientProtos.Action action : actions) { + skipCellsForMutation(action, cellScanner); + } + } + + private void skipCellsForMutation(ClientProtos.Action action, CellScanner cellScanner) { + try { + if (action.hasMutation()) { + MutationProto m = action.getMutation(); + if (m.hasAssociatedCellCount()) { + for (int i = 0; i < m.getAssociatedCellCount(); i++) { + cellScanner.advance(); + } + } + } + } catch (IOException e) { + // No need to handle these Individual Muatation level issue. Any way this entire RegionAction + // marked as failed as we could not see the Region here. At client side the top level + // RegionAction exception will be considered first. + LOG.error("Error while skipping Cells in CellScanner for invalid Region Mutations", e); + } + } + /** * Mutate data in a table. * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 61cb16a..8cbd7e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -35,11 +35,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.codec.KeyValueCodec; import org.apache.hadoop.hbase.exceptions.OperationConflictException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; @@ -73,6 +75,8 @@ public class TestMultiParallel { //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY, + KeyValueCodec.class.getCanonicalName()); UTIL.startMiniCluster(slaves); HTable t = UTIL.createTable(TEST_TABLE, Bytes.toBytes(FAMILY)); UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));