diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index b2c012d..7fbc3ce 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -617,6 +617,24 @@ public class HTable implements Table { } } + @Override + public Result update(final Update update) throws IOException { + NoncedRegionServerCallable callable = + new NoncedRegionServerCallable(this.connection, getName(), update.getRow(), + this.rpcControllerFactory.newController()) { + @Override + protected Result rpcCall() throws Exception { + MutateRequest request = RequestConverter.buildMutateRequest( + getLocation().getRegionInfo().getRegionName(), update, getNonceGroup(), getNonce()); + MutateResponse response = doMutate(request); + if (!response.hasResult()) return null; + return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + } + }; + return rpcCallerFactory. newCaller(this.writeRpcTimeout). + callWithRetries(callable, this.operationTimeout); + } + /** * {@inheritDoc} */ diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 36627bd..d9bd133 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -277,6 +277,7 @@ public class MetricsConnection implements StatisticTrackable { @VisibleForTesting protected final CallTracker getTracker; @VisibleForTesting protected final CallTracker scanTracker; @VisibleForTesting protected final CallTracker appendTracker; + @VisibleForTesting protected final CallTracker updateTracker; @VisibleForTesting protected final CallTracker deleteTracker; @VisibleForTesting protected final CallTracker incrementTracker; @VisibleForTesting protected final CallTracker putTracker; @@ -333,6 +334,7 @@ public class MetricsConnection implements StatisticTrackable { this.getTracker = new CallTracker(this.registry, "Get", scope); this.scanTracker = new CallTracker(this.registry, "Scan", scope); this.appendTracker = new CallTracker(this.registry, "Mutate", "Append", scope); + this.updateTracker = new CallTracker(this.registry, "Mutate", "Update", scope); this.deleteTracker = new CallTracker(this.registry, "Mutate", "Delete", scope); this.incrementTracker = new CallTracker(this.registry, "Mutate", "Increment", scope); this.putTracker = new CallTracker(this.registry, "Mutate", "Put", scope); @@ -453,6 +455,9 @@ public class MetricsConnection implements StatisticTrackable { case PUT: putTracker.updateRpc(stats); return; + case UPDATE: + updateTracker.updateRpc(stats); + return; default: throw new RuntimeException("Unrecognized mutation type " + mutationType); } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowUpdater.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowUpdater.java new file mode 100644 index 0000000..f2060d7 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowUpdater.java @@ -0,0 +1,62 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.List; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface RowUpdater { + + /** + * Generate the mutation according to the current value of single row. + * Note: You MUST deal with the tags by yourself. For example, the ttl tag. + * @param currentValue come from the same row + * @param carrier Changes to apply to the specified row + * @return be saved to server + * @throws IOException + */ + Mutation apply(List currentValue, Mutation carrier) throws IOException; + + /** + * Concrete implementers can signal a failure condition in their code by throwing an + * {@link IOException}. + * @return The CellUpdater serialized using pb + * @throws IOException in case an I/O or a CellUpdater specific failure needs to be signaled. + */ + byte[] toByteArray() throws IOException; + + /** + * Concrete implementers can signal a failure condition in their code by throwing an + * {@link IOException}. + * @param pbBytes A pb serialized {@link RowUpdater} instance + * @return An instance of {@link RowUpdater} made from bytes + * @throws DeserializationException + * @see #toByteArray + */ + public static RowUpdater parseFrom(final byte [] pbBytes) throws DeserializationException { + throw new DeserializationException( + "parseFrom called on base Filter, but should be called on derived type"); + } +} \ No newline at end of file diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 016894b..74446dd 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -652,4 +652,19 @@ public interface Table extends Closeable { * @param writeRpcTimeout */ void setWriteRpcTimeout(int writeRpcTimeout); + + /** + * Updates values to one or more columns within a single row. + *

+ * This operation does not appear atomic to readers. Updates are done + * under a single row lock, so write operations to a row are synchronized, but + * readers do not take row locks so get and scan operations can see this + * operation partially completed. + * + * @param update object that specifies the columns and amounts to be used + * for the update operations + * @throws IOException + * @return values of columns after the update operation (maybe null) + */ + public Result update(final Update update) throws IOException; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Update.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Update.java new file mode 100644 index 0000000..fc42ce0 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Update.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.UUID; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import static org.apache.hadoop.hbase.client.Mutation.checkRow; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.visibility.CellVisibility; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class Update extends Mutation { + private static final long HEAP_OVERHEAD = ClassSize.REFERENCE * 2 + ClassSize.TIMERANGE; + private TimeRange tr = new TimeRange(); + private final RowUpdater updater; + public Update(byte[] row, RowUpdater updater) throws IOException { + this(row, 0, row.length, updater); + } + + public Update(final byte [] rowArray, final int rowOffset, final int rowLength, RowUpdater updater) throws IOException { + checkRow(rowArray, rowOffset, rowLength); + if (updater == null) { + throw new IllegalArgumentException("Row updater is null"); + } + this.row = Bytes.copy(rowArray, rowOffset, rowLength); + this.updater = updater; + } + + public RowUpdater getRowUpdater() { + return updater; + } + + /** + * Add the specified column for retrieving data. + * The family and qualifier are used for creating Get operation. + * @param family family name + * @param qualifier column qualifier + * @return this + * @throws java.io.IOException + */ + public Update add(byte [] family, byte [] qualifier) throws IOException { + List list = getCellList(family); + KeyValue kv = createPutKeyValue(family, qualifier, ts, null); + list.add(kv); + familyMap.put(CellUtil.cloneFamily(kv), list); + return this; + } + + /** + * Add the specified column for trieveing data. Operation assumes that + * the passed KeyValue is immutable and its backing array will not be modified + * for the duration of this Update. + * The family and qualifier are used for creating Get operation. + * The value is useless. + * @param kv individual KeyValue + * @return this + * @throws java.io.IOException e + */ + public Update add(Cell kv) throws IOException{ + byte [] family = CellUtil.cloneFamily(kv); + List list = getCellList(family); + //Checking that the row of the kv is the same as the put + int res = Bytes.compareTo(this.row, 0, row.length, + kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); + if (res != 0) { + throw new WrongRowIOException("The row in " + kv.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); + } + list.add(kv); + familyMap.put(family, list); + return this; + } + + /** + * @param returnResults True (default) if the increment operation should return the results. A + * client that is not interested in the result can save network bandwidth setting this + * to false. + * @return this + */ + @Override + public Update setReturnResults(boolean returnResults) { + super.setReturnResults(returnResults); + return this; + } + + /** + * @return current setting for returnResults + */ + // This method makes public the superclasses's protected method. + @Override + public boolean isReturnResults() { + return super.isReturnResults(); + } + + /** + * Gets the TimeRange used for this increment. + * @return TimeRange + */ + public TimeRange getTimeRange() { + return this.tr; + } + + /** + * Sets the TimeRange to be used on the Get for this increment. + *

+ * This is useful for when you have counters that only last for specific + * periods of time (ie. counters that are partitioned by time). By setting + * the range of valid times for this increment, you can potentially gain + * some performance with a more optimal Get operation. + *

+ * This range is used as [minStamp, maxStamp). + * @param minStamp minimum timestamp value, inclusive + * @param maxStamp maximum timestamp value, exclusive + * @throws IOException if invalid time range + * @return this + */ + public Update setTimeRange(long minStamp, long maxStamp) + throws IOException { + tr = new TimeRange(minStamp, maxStamp); + return this; + } + + @Override + protected long extraHeapSize(){ + return HEAP_OVERHEAD; + } + + @Override + public Update setAttribute(String name, byte[] value) { + return (Update) super.setAttribute(name, value); + } + + @Override + public Update setId(String id) { + return (Update) super.setId(id); + } + + @Override + public Update setDurability(Durability d) { + return (Update) super.setDurability(d); + } + + @Override + public Update setFamilyCellMap(NavigableMap> map) { + return (Update) super.setFamilyCellMap(map); + } + + @Override + public Update setClusterIds(List clusterIds) { + return (Update) super.setClusterIds(clusterIds); + } + + @Override + public Update setCellVisibility(CellVisibility expression) { + return (Update) super.setCellVisibility(expression); + } + + @Override + public Update setACL(String user, Permission perms) { + return (Update) super.setACL(user, perms); + } + + @Override + public Update setACL(Map perms) { + return (Update) super.setACL(perms); + } + + @Override + public Update setTTL(long ttl) { + return (Update) super.setTTL(ttl); + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 330348d..f4de2c4 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -96,6 +96,9 @@ import com.google.protobuf.RpcController; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; +import org.apache.hadoop.hbase.client.RowUpdater; +import org.apache.hadoop.hbase.client.Update; +import org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos; /** * Protobufs utility. @@ -618,6 +621,73 @@ public final class ProtobufUtil { } /** + * Convert a protocol buffer Mutate to a Update. + * + * @param proto the protocol buffer Mutate to convert + * @param cellScanner + * @return the converted client Increment + * @throws IOException + */ + public static Update toUpdate(final ClientProtos.MutationProto proto, final CellScanner cellScanner) + throws IOException { + ClientProtos.MutationProto.MutationType type = proto.getMutateType(); + assert type == ClientProtos.MutationProto.MutationType.UPDATE : type.name(); + assert proto.hasRowUpdater(); + RowUpdater updater = toRowUpdater(proto.getRowUpdater()); + Update update = null; + int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + if (cellCount > 0) { + // The proto has metadata only and the data is separate to be found in the cellScanner. + if (cellScanner == null) { + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); + } + for (int i = 0; i < cellCount; i++) { + if (!cellScanner.advance()) { + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); + } + Cell cell = cellScanner.current(); + if (update == null) { + update = new Update(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), updater); + } + update.add(cell); + } + } else { + if (!proto.hasRow()) { + throw new IllegalArgumentException("row cannot be null"); + } + byte[] row = proto.getRow().toByteArray(); + update = new Update(row, updater); + for (ClientProtos.MutationProto.ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (ClientProtos.MutationProto.ColumnValue.QualifierValue qv: column.getQualifierValueList()) { + byte[] qualifier = qv.getQualifier().toByteArray(); + if (!qv.hasValue()) { + throw new DoNotRetryIOException("Missing required field: qualifier value"); + } + byte[] value = qv.getValue().toByteArray(); + byte[] tags = null; + if (qv.hasTags()) { + tags = qv.getTags().toByteArray(); + } + update.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), + KeyValue.Type.Put, value, tags)); + } + } + } + if (proto.hasTimeRange()) { + TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); + update.setTimeRange(timeRange.getMin(), timeRange.getMax()); + } + update.setDurability(toDurability(proto.getDurability())); + for (HBaseProtos.NameBytesPair attribute : proto.getAttributeList()) { + update.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); + } + return update; + } + + /** * Convert a protocol buffer Mutate to an Append * @param cellScanner * @param proto the protocol buffer Mutate to convert @@ -1461,6 +1531,45 @@ public final class ProtobufUtil { } /** + * Convert a client CellUpdater to a protocol CellUpdater. + * @param updater the CellUpdater to convert + * @return the converted protocol buffer CellUpdater + * @throws java.io.IOException + */ + public static RowUpdaterProtos.RowUpdater toRowUpdater(RowUpdater updater) throws IOException { + return RowUpdaterProtos.RowUpdater.newBuilder() + .setName(updater.getClass().getName()) + .setSerializedUpdater(ByteStringer.wrap(updater.toByteArray())) + .build(); + } + + /** + * Convert a protocol CellUpdater Filter to a client CellUpdater + * + * @param proto the protocol buffer CellUpdater to convert + * @return the converted CellUpdater + */ + @SuppressWarnings("unchecked") + public static RowUpdater toRowUpdater(RowUpdaterProtos.RowUpdater proto) throws IOException { + String type = proto.getName(); + final byte [] value = proto.getSerializedUpdater().toByteArray(); + String funcName = "parseFrom"; + try { + Class c = + (Class)Class.forName(type, true, CLASS_LOADER); + Method parseFrom = c.getMethod(funcName, byte[].class); + if (parseFrom == null) { + throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + } + return (RowUpdater)parseFrom.invoke(c, value); + } catch (Exception e) { + // Either we couldn't instantiate the method object, or "parseFrom" failed. + // In either case, let's not retry. + throw new DoNotRetryIOException(e); + } + } + + /** * Convert a delete KeyValue type to protocol buffer DeleteType. * * @param type diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 2758c26..fafceb1 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -61,9 +61,11 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.RowUpdater; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Update; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -122,6 +124,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Column; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; @@ -727,6 +730,73 @@ public final class ProtobufUtil { } /** + * Convert a protocol buffer Mutate to a Update. + * + * @param proto the protocol buffer Mutate to convert + * @param cellScanner + * @return the converted client Increment + * @throws IOException + */ + public static Update toUpdate(final MutationProto proto, final CellScanner cellScanner) + throws IOException { + ClientProtos.MutationProto.MutationType type = proto.getMutateType(); + assert type == ClientProtos.MutationProto.MutationType.UPDATE : type.name(); + assert proto.hasRowUpdater(); + RowUpdater updater = toRowUpdater(proto.getRowUpdater()); + Update update = null; + int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + if (cellCount > 0) { + // The proto has metadata only and the data is separate to be found in the cellScanner. + if (cellScanner == null) { + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); + } + for (int i = 0; i < cellCount; i++) { + if (!cellScanner.advance()) { + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); + } + Cell cell = cellScanner.current(); + if (update == null) { + update = new Update(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), updater); + } + update.add(cell); + } + } else { + if (!proto.hasRow()) { + throw new IllegalArgumentException("row cannot be null"); + } + byte[] row = proto.getRow().toByteArray(); + update = new Update(row, updater); + for (ClientProtos.MutationProto.ColumnValue column: proto.getColumnValueList()) { + byte[] family = column.getFamily().toByteArray(); + for (ClientProtos.MutationProto.ColumnValue.QualifierValue qv: column.getQualifierValueList()) { + byte[] qualifier = qv.getQualifier().toByteArray(); + if (!qv.hasValue()) { + throw new DoNotRetryIOException("Missing required field: qualifier value"); + } + byte[] value = qv.getValue().toByteArray(); + byte[] tags = null; + if (qv.hasTags()) { + tags = qv.getTags().toByteArray(); + } + update.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), + KeyValue.Type.Put, value, tags)); + } + } + } + if (proto.hasTimeRange()) { + TimeRange timeRange = protoToTimeRange(proto.getTimeRange()); + update.setTimeRange(timeRange.getMin(), timeRange.getMax()); + } + update.setDurability(toDurability(proto.getDurability())); + for (HBaseProtos.NameBytesPair attribute : proto.getAttributeList()) { + update.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); + } + return update; + } + + /** * Convert a protocol buffer Mutate to an Append * @param cellScanner * @param proto the protocol buffer Mutate to convert @@ -876,7 +946,8 @@ public final class ProtobufUtil { public static Get toGet(final MutationProto proto, final CellScanner cellScanner) throws IOException { MutationType type = proto.getMutateType(); - assert type == MutationType.INCREMENT || type == MutationType.APPEND : type.name(); + assert type == MutationType.INCREMENT || type == MutationType.APPEND + || type == MutationType.UPDATE : type.name(); byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; Get get = null; int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; @@ -1241,6 +1312,59 @@ public final class ProtobufUtil { return builder.build(); } + /** + * Convert a client Increment to a protobuf Mutate. + * + * @param update + * @param builder + * @param nonce + * @return the converted mutate + */ + public static MutationProto toMutation( + final Update update, final MutationProto.Builder builder, long nonce) throws IOException { + builder.setRow(UnsafeByteOperations.unsafeWrap(update.getRow())); + builder.setMutateType(MutationType.UPDATE); + builder.setDurability(toDurability(update.getDurability())); + if (nonce != HConstants.NO_NONCE) { + builder.setNonce(nonce); + } + TimeRange timeRange = update.getTimeRange(); + setTimeRange(builder, timeRange); + ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); + QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); + for (Map.Entry> family: update.getFamilyCellMap().entrySet()) { + columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); + columnBuilder.clearQualifierValue(); + List values = family.getValue(); + if (values != null && values.size() > 0) { + for (Cell cell: values) { + valueBuilder.clear(); + valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( + cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + if (cell.getTagsLength() > 0) { + valueBuilder.setTags(UnsafeByteOperations.unsafeWrap(cell.getTagsArray(), + cell.getTagsOffset(), cell.getTagsLength())); + } + columnBuilder.addQualifierValue(valueBuilder.build()); + } + } + builder.addColumnValue(columnBuilder.build()); + } + Map attributes = update.getAttributesMap(); + if (!attributes.isEmpty()) { + NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); + for (Map.Entry attribute : attributes.entrySet()) { + attributeBuilder.setName(attribute.getKey()); + attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); + builder.addAttribute(attributeBuilder.build()); + } + } + builder.setRowUpdater(toRowUpdater(update.getRowUpdater())); + return builder.build(); + } + public static MutationProto toMutation(final MutationType type, final Mutation mutation) throws IOException { return toMutation(type, mutation, HConstants.NO_NONCE); @@ -1329,6 +1453,9 @@ public final class ProtobufUtil { if (mutation instanceof Increment) { setTimeRange(builder, ((Increment)mutation).getTimeRange()); } + if (mutation instanceof Update) { + builder.setRowUpdater(ProtobufUtil.toRowUpdater(((Update) mutation).getRowUpdater())); + } if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); } @@ -1569,6 +1696,45 @@ public final class ProtobufUtil { } /** + * Convert a client CellUpdater to a protocol CellUpdater. + * @param updater the CellUpdater to convert + * @return the converted protocol buffer CellUpdater + * @throws java.io.IOException + */ + public static RowUpdaterProtos.RowUpdater toRowUpdater(RowUpdater updater) throws IOException { + return RowUpdaterProtos.RowUpdater.newBuilder() + .setName(updater.getClass().getName()) + .setSerializedUpdater(UnsafeByteOperations.unsafeWrap(updater.toByteArray())) + .build(); + } + + /** + * Convert a protocol CellUpdater Filter to a client CellUpdater + * + * @param proto the protocol buffer CellUpdater to convert + * @return the converted CellUpdater + */ + @SuppressWarnings("unchecked") + public static RowUpdater toRowUpdater(RowUpdaterProtos.RowUpdater proto) throws IOException { + String type = proto.getName(); + final byte [] value = proto.getSerializedUpdater().toByteArray(); + String funcName = "parseFrom"; + try { + Class c = + (Class)Class.forName(type, true, CLASS_LOADER); + Method parseFrom = c.getMethod(funcName, byte[].class); + if (parseFrom == null) { + throw new IOException("Unable to locate function: " + funcName + " in type: " + type); + } + return (RowUpdater)parseFrom.invoke(c, value); + } catch (Exception e) { + // Either we couldn't instantiate the method object, or "parseFrom" failed. + // In either case, let's not retry. + throw new DoNotRetryIOException(e); + } + } + + /** * Convert a delete KeyValue type to protocol buffer DeleteType. * * @param type diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index abd1563..7c09ae9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Update; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -344,6 +345,19 @@ public final class RequestConverter { return builder.build(); } + public static MutateRequest buildMutateRequest(final byte[] regionName, + final Update update, final long nonceGroup, final long nonce) throws IOException { + MutateRequest.Builder builder = MutateRequest.newBuilder(); + RegionSpecifier region = buildRegionSpecifier( + RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { + builder.setNonceGroup(nonceGroup); + } + builder.setMutation(ProtobufUtil.toMutation(update, MutationProto.newBuilder(), nonce)); + return builder.build(); + } + /** * Create a protocol buffer MutateRequest for a delete * @@ -612,6 +626,9 @@ public final class RequestConverter { } else if (row instanceof Increment) { regionActionBuilder.addAction(actionBuilder.setMutation( ProtobufUtil.toMutation((Increment)row, mutationBuilder, action.getNonce()))); + } else if (row instanceof Update) { + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation((Update)row, mutationBuilder, action.getNonce()))); } else if (row instanceof RegionCoprocessorServiceExec) { RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. @@ -695,6 +712,11 @@ public final class RequestConverter { cells.add(i); builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( MutationType.INCREMENT, i, mutationBuilder, action.getNonce()))); + } else if (row instanceof Update) { + Update i = (Update)row; + cells.add(i); + builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( + MutationType.UPDATE, i, mutationBuilder, action.getNonce()))); } else if (row instanceof RegionCoprocessorServiceExec) { RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 0e7dd8d..7993866 100644 --- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -82,6 +82,13 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo void updateAppend(long t); /** + * Update the Update time histogram. + * + * @param t time it took + */ + void updateUpdate(long t); + + /** * Update the Replay time histogram. * * @param t time it took @@ -126,6 +133,11 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo void incrSlowAppend(); /** + * Increment the number of slow Updates that have happened. + */ + void incrSlowUpdate(); + + /** * Update the split transaction time histogram * @param t time it took, in milliseconds */ @@ -339,6 +351,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String INCREMENT_KEY = "increment"; String MUTATE_KEY = "mutate"; String APPEND_KEY = "append"; + String UPDATE_KEY = "update"; String REPLAY_KEY = "replay"; String SCAN_KEY = "scan"; String SCAN_SIZE_KEY = "scanSize"; @@ -349,6 +362,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SLOW_DELETE_KEY = "slowDeleteCount"; String SLOW_INCREMENT_KEY = "slowIncrementCount"; String SLOW_APPEND_KEY = "slowAppendCount"; + String SLOW_UPDATE_KEY = "slowUpdateCount"; String SLOW_MUTATE_DESC = "The number of Multis that took over 1000ms to complete"; String SLOW_DELETE_DESC = diff --git hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index decf841..6c07f10 100644 --- hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -85,5 +85,8 @@ public interface MetricsRegionSource extends Comparable { */ MetricsRegionAggregateSource getAggregateSource(); - + /** + * Update related counts of updates. + */ + void updateUpdate(); } diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index b412fd1..3d45cd7 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -41,6 +41,7 @@ public class MetricsRegionServerSourceImpl private final MetricHistogram getHisto; private final MetricHistogram incrementHisto; private final MetricHistogram appendHisto; + private final MetricHistogram updateHisto; private final MetricHistogram replayHisto; private final MetricHistogram scanSizeHisto; private final MetricHistogram scanTimeHisto; @@ -50,6 +51,7 @@ public class MetricsRegionServerSourceImpl private final MutableFastCounter slowGet; private final MutableFastCounter slowIncrement; private final MutableFastCounter slowAppend; + private final MutableFastCounter slowUpdate; private final MutableFastCounter splitRequest; private final MutableFastCounter splitSuccess; @@ -112,6 +114,9 @@ public class MetricsRegionServerSourceImpl appendHisto = getMetricsRegistry().newTimeHistogram(APPEND_KEY); slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0L); + updateHisto = getMetricsRegistry().newTimeHistogram(UPDATE_KEY); + slowUpdate = getMetricsRegistry().newCounter(SLOW_UPDATE_KEY, SLOW_APPEND_DESC, 0L); + replayHisto = getMetricsRegistry().newTimeHistogram(REPLAY_KEY); scanSizeHisto = getMetricsRegistry().newSizeHistogram(SCAN_SIZE_KEY); scanTimeHisto = getMetricsRegistry().newTimeHistogram(SCAN_TIME_KEY); @@ -531,4 +536,14 @@ public class MetricsRegionServerSourceImpl public void updatePauseTimeWithoutGc(long t) { pausesWithoutGc.add(t); } + + @Override + public void updateUpdate(long t) { + updateHisto.add(t); + } + + @Override + public void incrSlowUpdate() { + slowUpdate.incr(); + } } diff --git hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 17710e1..0a4977d 100644 --- hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -51,7 +51,7 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final String regionIncrementKey; private final String regionAppendKey; private final String regionScanKey; - + private final String regionUpdateKey; /* * Implementation note: Do not put histograms per region. With hundreds of regions in a server * histograms allocate too many counters. See HBASE-17016. @@ -62,6 +62,7 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final MutableFastCounter regionAppend; private final MutableFastCounter regionGet; private final MutableFastCounter regionScan; + private final MutableFastCounter regionUpdate; private final int hashCode; @@ -101,6 +102,9 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { regionScanKey = regionNamePrefix + MetricsRegionServerSource.SCAN_KEY + suffix; regionScan = registry.getCounter(regionScanKey, 0L); + regionUpdateKey = regionNamePrefix + MetricsRegionServerSource.UPDATE_KEY + suffix; + regionUpdate = registry.getCounter(regionUpdateKey, 0L); + hashCode = regionWrapper.getRegionHashCode(); } @@ -286,4 +290,9 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { return obj == this || (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); } + + @Override + public void updateUpdate() { + regionUpdate.incr(); + } } diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java index bfd196e..b7a8752 100644 --- hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java +++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java @@ -8593,6 +8593,19 @@ public final class ClientProtos { * optional uint64 nonce = 9; */ long getNonce(); + + /** + * optional .hbase.pb.RowUpdater row_updater = 10; + */ + boolean hasRowUpdater(); + /** + * optional .hbase.pb.RowUpdater row_updater = 10; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater(); + /** + * optional .hbase.pb.RowUpdater row_updater = 10; + */ + org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder(); } /** *

@@ -8726,6 +8739,19 @@ public final class ClientProtos {
               nonce_ = input.readUInt64();
               break;
             }
+            case 82: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000080) == 0x00000080)) {
+                subBuilder = rowUpdater_.toBuilder();
+              }
+              rowUpdater_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(rowUpdater_);
+                rowUpdater_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000080;
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -8894,6 +8920,10 @@ public final class ClientProtos {
        * DELETE = 3;
        */
       DELETE(3),
+      /**
+       * UPDATE = 4;
+       */
+      UPDATE(4),
       ;
 
       /**
@@ -8912,6 +8942,10 @@ public final class ClientProtos {
        * DELETE = 3;
        */
       public static final int DELETE_VALUE = 3;
+      /**
+       * UPDATE = 4;
+       */
+      public static final int UPDATE_VALUE = 4;
 
 
       public final int getNumber() {
@@ -8932,6 +8966,7 @@ public final class ClientProtos {
           case 1: return INCREMENT;
           case 2: return PUT;
           case 3: return DELETE;
+          case 4: return UPDATE;
           default: return null;
         }
       }
@@ -10983,6 +11018,27 @@ public final class ClientProtos {
       return nonce_;
     }
 
+    public static final int ROW_UPDATER_FIELD_NUMBER = 10;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater rowUpdater_;
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public boolean hasRowUpdater() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater() {
+      return rowUpdater_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance() : rowUpdater_;
+    }
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder() {
+      return rowUpdater_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance() : rowUpdater_;
+    }
+
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
@@ -11001,6 +11057,12 @@ public final class ClientProtos {
           return false;
         }
       }
+      if (hasRowUpdater()) {
+        if (!getRowUpdater().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -11034,6 +11096,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeUInt64(9, nonce_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeMessage(10, getRowUpdater());
+      }
       unknownFields.writeTo(output);
     }
 
@@ -11078,6 +11143,10 @@ public final class ClientProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeUInt64Size(9, nonce_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(10, getRowUpdater());
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -11132,6 +11201,11 @@ public final class ClientProtos {
         result = result && (getNonce()
             == other.getNonce());
       }
+      result = result && (hasRowUpdater() == other.hasRowUpdater());
+      if (hasRowUpdater()) {
+        result = result && getRowUpdater()
+            .equals(other.getRowUpdater());
+      }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
@@ -11181,6 +11255,10 @@ public final class ClientProtos {
         hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
             getNonce());
       }
+      if (hasRowUpdater()) {
+        hash = (37 * hash) + ROW_UPDATER_FIELD_NUMBER;
+        hash = (53 * hash) + getRowUpdater().hashCode();
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -11307,6 +11385,7 @@ public final class ClientProtos {
           getColumnValueFieldBuilder();
           getAttributeFieldBuilder();
           getTimeRangeFieldBuilder();
+          getRowUpdaterFieldBuilder();
         }
       }
       public Builder clear() {
@@ -11341,6 +11420,12 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000080);
         nonce_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000100);
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = null;
+        } else {
+          rowUpdaterBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000200);
         return this;
       }
 
@@ -11415,6 +11500,14 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000040;
         }
         result.nonce_ = nonce_;
+        if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        if (rowUpdaterBuilder_ == null) {
+          result.rowUpdater_ = rowUpdater_;
+        } else {
+          result.rowUpdater_ = rowUpdaterBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -11530,6 +11623,9 @@ public final class ClientProtos {
         if (other.hasNonce()) {
           setNonce(other.getNonce());
         }
+        if (other.hasRowUpdater()) {
+          mergeRowUpdater(other.getRowUpdater());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -11546,6 +11642,11 @@ public final class ClientProtos {
             return false;
           }
         }
+        if (hasRowUpdater()) {
+          if (!getRowUpdater().isInitialized()) {
+            return false;
+          }
+        }
         return true;
       }
 
@@ -12449,6 +12550,124 @@ public final class ClientProtos {
         onChanged();
         return this;
       }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater rowUpdater_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder> rowUpdaterBuilder_;
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public boolean hasRowUpdater() {
+        return ((bitField0_ & 0x00000200) == 0x00000200);
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater() {
+        if (rowUpdaterBuilder_ == null) {
+          return rowUpdater_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance() : rowUpdater_;
+        } else {
+          return rowUpdaterBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder setRowUpdater(org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater value) {
+        if (rowUpdaterBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          rowUpdater_ = value;
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder setRowUpdater(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder builderForValue) {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = builderForValue.build();
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder mergeRowUpdater(org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater value) {
+        if (rowUpdaterBuilder_ == null) {
+          if (((bitField0_ & 0x00000200) == 0x00000200) &&
+              rowUpdater_ != null &&
+              rowUpdater_ != org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance()) {
+            rowUpdater_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.newBuilder(rowUpdater_).mergeFrom(value).buildPartial();
+          } else {
+            rowUpdater_ = value;
+          }
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder clearRowUpdater() {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = null;
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000200);
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder getRowUpdaterBuilder() {
+        bitField0_ |= 0x00000200;
+        onChanged();
+        return getRowUpdaterFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder() {
+        if (rowUpdaterBuilder_ != null) {
+          return rowUpdaterBuilder_.getMessageOrBuilder();
+        } else {
+          return rowUpdater_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance() : rowUpdater_;
+        }
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder> 
+          getRowUpdaterFieldBuilder() {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdaterBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder>(
+                  getRowUpdater(),
+                  getParentForChildren(),
+                  isClean());
+          rowUpdater_ = null;
+        }
+        return rowUpdaterBuilder_;
+      }
       public final Builder setUnknownFields(
           final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
         return super.setUnknownFields(unknownFields);
@@ -40381,163 +40600,165 @@ public final class ClientProtos {
     java.lang.String[] descriptorData = {
       "\n\014Client.proto\022\010hbase.pb\032\013HBase.proto\032\014F" +
       "ilter.proto\032\nCell.proto\032\020Comparator.prot" +
-      "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
-      "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
-      "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
-      "ualifier\030\002 \003(\014\"\276\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
-      "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
-      "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
-      "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
-      "e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
-      "ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
-      "e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
-      " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" +
-      "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" +
-      "\006STRONG\0226\n\rcf_time_range\030\r \003(\0132\037.hbase.p" +
-      "b.ColumnFamilyTimeRange\022&\n\036load_column_f" +
-      "amilies_on_demand\030\016 \001(\010\"\203\001\n\006Result\022\034\n\004ce" +
-      "ll\030\001 \003(\0132\016.hbase.pb.Cell\022\035\n\025associated_c" +
-      "ell_count\030\002 \001(\005\022\016\n\006exists\030\003 \001(\010\022\024\n\005stale" +
-      "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n",
-      "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
-      "egionSpecifier\022\032\n\003get\030\002 \002(\0132\r.hbase.pb.G" +
-      "et\"/\n\013GetResponse\022 \n\006result\030\001 \001(\0132\020.hbas" +
-      "e.pb.Result\"\222\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016" +
-      "\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022+\n\014com" +
-      "pare_type\030\004 \002(\0162\025.hbase.pb.CompareType\022(" +
-      "\n\ncomparator\030\005 \002(\0132\024.hbase.pb.Comparator" +
-      "\"\364\006\n\rMutationProto\022\013\n\003row\030\001 \001(\014\0229\n\013mutat" +
-      "e_type\030\002 \001(\0162$.hbase.pb.MutationProto.Mu" +
-      "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase",
-      ".pb.MutationProto.ColumnValue\022\021\n\ttimesta" +
-      "mp\030\004 \001(\004\022*\n\tattribute\030\005 \003(\0132\027.hbase.pb.N" +
-      "ameBytesPair\022C\n\ndurability\030\006 \001(\0162\".hbase" +
-      ".pb.MutationProto.Durability:\013USE_DEFAUL" +
-      "T\022\'\n\ntime_range\030\007 \001(\0132\023.hbase.pb.TimeRan" +
-      "ge\022\035\n\025associated_cell_count\030\010 \001(\005\022\r\n\005non" +
-      "ce\030\t \001(\004\032\371\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014" +
-      "\022K\n\017qualifier_value\030\002 \003(\01322.hbase.pb.Mut" +
-      "ationProto.ColumnValue.QualifierValue\032\214\001" +
-      "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v",
-      "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_" +
-      "type\030\004 \001(\0162\".hbase.pb.MutationProto.Dele" +
-      "teType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022\017\n\013US" +
-      "E_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002" +
-      "\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mutatio" +
-      "nType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020" +
-      "\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ON" +
-      "E_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020" +
-      "\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VE" +
-      "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(",
-      "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation" +
-      "\030\002 \002(\0132\027.hbase.pb.MutationProto\022&\n\tcondi" +
-      "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
-      "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
-      "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
-      "\001(\010\"\275\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
-      "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
-      "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
-      "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
-      "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan",
-      "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo" +
-      "cks\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017m" +
-      "ax_result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(" +
-      "\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036load_column_f" +
-      "amilies_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027" +
-      "\n\010reversed\030\017 \001(\010:\005false\0222\n\013consistency\030\020" +
-      " \001(\0162\025.hbase.pb.Consistency:\006STRONG\022\017\n\007c" +
-      "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
-      "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
-      "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r",
-      "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034" +
-      "\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_" +
-      "id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclos" +
-      "e_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037" +
-      "\n\027client_handles_partials\030\007 \001(\010\022!\n\031clien" +
-      "t_handles_heartbeats\030\010 \001(\010\022\032\n\022track_scan" +
-      "_metrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n" +
-      "\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(\r\022" +
-      "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" +
-      "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.",
-      "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag" +
-      "_per_result\030\007 \003(\010\022\036\n\026more_results_in_reg" +
-      "ion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014" +
-      "scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetric" +
-      "s\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001 \002" +
-      "(\0132\031.hbase.pb.RegionSpecifier\022>\n\013family_" +
-      "path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReque" +
-      "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n" +
-      "\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationTok" +
-      "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(",
-      "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014" +
-      "\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n" +
-      "\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\niden" +
-      "tifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003" +
-      " \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLoad" +
-      "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" +
-      "ableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" +
-      "onSpecifier\"-\n\027PrepareBulkLoadResponse\022\022" +
-      "\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadReq" +
-      "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013",
-      "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu" +
-      "lkLoadResponse\"a\n\026CoprocessorServiceCall" +
-      "\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013m" +
-      "ethod_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Cop" +
-      "rocessorServiceResult\022&\n\005value\030\001 \001(\0132\027.h" +
-      "base.pb.NameBytesPair\"v\n\031CoprocessorServ" +
-      "iceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" +
-      "gionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.C" +
-      "oprocessorServiceCall\"o\n\032CoprocessorServ" +
-      "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R",
-      "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb" +
-      ".NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r" +
-      "\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationPr" +
-      "oto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014servi" +
-      "ce_call\030\004 \001(\0132 .hbase.pb.CoprocessorServ" +
-      "iceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(\0132" +
-      "\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 \001" +
-      "(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017" +
-      "RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010" +
-      "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction",
-      "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat" +
-      "s\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpeci" +
-      "fier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoad" +
-      "Stats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001 \001" +
-      "(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\t" +
-      "exception\030\003 \001(\0132\027.hbase.pb.NameBytesPair" +
-      "\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Copr" +
-      "ocessorServiceResult\0220\n\tloadStats\030\005 \001(\0132" +
-      "\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Region" +
-      "ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
-      "\033.hbase.pb.ResultOrException\022*\n\texceptio" +
-      "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
-      "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
-      "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
-      "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n\rM" +
-      "ultiResponse\0228\n\022regionActionResult\030\001 \003(\013" +
-      "2\034.hbase.pb.RegionActionResult\022\021\n\tproces" +
-      "sed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hb" +
-      "ase.pb.MultiRegionLoadStats*\'\n\013Consisten" +
-      "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS",
-      "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb" +
-      "ase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.pb." +
-      "MutateRequest\032\030.hbase.pb.MutateResponse\022" +
-      "5\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase.p" +
-      "b.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbase." +
-      "pb.BulkLoadHFileRequest\032\037.hbase.pb.BulkL" +
-      "oadHFileResponse\022V\n\017PrepareBulkLoad\022 .hb" +
-      "ase.pb.PrepareBulkLoadRequest\032!.hbase.pb" +
-      ".PrepareBulkLoadResponse\022V\n\017CleanupBulkL" +
-      "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!.",
-      "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec" +
-      "Service\022#.hbase.pb.CoprocessorServiceReq" +
-      "uest\032$.hbase.pb.CoprocessorServiceRespon" +
-      "se\022d\n\027ExecRegionServerService\022#.hbase.pb" +
-      ".CoprocessorServiceRequest\032$.hbase.pb.Co" +
-      "processorServiceResponse\0228\n\005Multi\022\026.hbas" +
-      "e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" +
-      "seBI\n1org.apache.hadoop.hbase.shaded.pro" +
-      "tobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
+      "o\032\017MapReduce.proto\032\020RowUpdater.proto\"\037\n\016" +
+      "Authorizations\022\r\n\005label\030\001 \003(\t\"$\n\016CellVis" +
+      "ibility\022\022\n\nexpression\030\001 \002(\t\"+\n\006Column\022\016\n" +
+      "\006family\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\276\003\n\003Get" +
+      "\022\013\n\003row\030\001 \002(\014\022 \n\006column\030\002 \003(\0132\020.hbase.pb" +
+      ".Column\022*\n\tattribute\030\003 \003(\0132\027.hbase.pb.Na" +
+      "meBytesPair\022 \n\006filter\030\004 \001(\0132\020.hbase.pb.F" +
+      "ilter\022\'\n\ntime_range\030\005 \001(\0132\023.hbase.pb.Tim",
+      "eRange\022\027\n\014max_versions\030\006 \001(\r:\0011\022\032\n\014cache" +
+      "_blocks\030\007 \001(\010:\004true\022\023\n\013store_limit\030\010 \001(\r" +
+      "\022\024\n\014store_offset\030\t \001(\r\022\035\n\016existence_only" +
+      "\030\n \001(\010:\005false\0222\n\013consistency\030\014 \001(\0162\025.hba" +
+      "se.pb.Consistency:\006STRONG\0226\n\rcf_time_ran" +
+      "ge\030\r \003(\0132\037.hbase.pb.ColumnFamilyTimeRang" +
+      "e\022&\n\036load_column_families_on_demand\030\016 \001(" +
+      "\010\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.Ce" +
+      "ll\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exi" +
+      "sts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007parti",
+      "al\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region\030" +
+      "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get\030" +
+      "\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006r" +
+      "esult\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondit" +
+      "ion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqual" +
+      "ifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hbas" +
+      "e.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024.h" +
+      "base.pb.Comparator\"\253\007\n\rMutationProto\022\013\n\003" +
+      "row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase.p" +
+      "b.MutationProto.MutationType\0229\n\014column_v",
+      "alue\030\003 \003(\0132#.hbase.pb.MutationProto.Colu" +
+      "mnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute\030" +
+      "\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurabi" +
+      "lity\030\006 \001(\0162\".hbase.pb.MutationProto.Dura" +
+      "bility:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\0132" +
+      "\023.hbase.pb.TimeRange\022\035\n\025associated_cell_" +
+      "count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\022)\n\013row_update" +
+      "r\030\n \001(\0132\024.hbase.pb.RowUpdater\032\371\001\n\013Column" +
+      "Value\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value" +
+      "\030\002 \003(\01322.hbase.pb.MutationProto.ColumnVa",
+      "lue.QualifierValue\032\214\001\n\016QualifierValue\022\021\n" +
+      "\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimes" +
+      "tamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase." +
+      "pb.MutationProto.DeleteType\022\014\n\004tags\030\005 \001(" +
+      "\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP" +
+      "_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tF" +
+      "SYNC_WAL\020\004\"J\n\014MutationType\022\n\n\006APPEND\020\000\022\r" +
+      "\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\022\n\n\006UP" +
+      "DATE\020\004\"p\n\nDeleteType\022\026\n\022DELETE_ONE_VERSI" +
+      "ON\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDE",
+      "LETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003" +
+      "\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(\0132\031.hba" +
+      "se.pb.RegionSpecifier\022)\n\010mutation\030\002 \002(\0132" +
+      "\027.hbase.pb.MutationProto\022&\n\tcondition\030\003 " +
+      "\001(\0132\023.hbase.pb.Condition\022\023\n\013nonce_group\030" +
+      "\004 \001(\004\"E\n\016MutateResponse\022 \n\006result\030\001 \001(\0132" +
+      "\020.hbase.pb.Result\022\021\n\tprocessed\030\002 \001(\010\"\275\004\n" +
+      "\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb.Column\022" +
+      "*\n\tattribute\030\002 \003(\0132\027.hbase.pb.NameBytesP" +
+      "air\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014",
+      "\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\nti" +
+      "me_range\030\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014m" +
+      "ax_versions\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001" +
+      "(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_resu" +
+      "lt_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014st" +
+      "ore_offset\030\014 \001(\r\022&\n\036load_column_families" +
+      "_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010rever" +
+      "sed\030\017 \001(\010:\005false\0222\n\013consistency\030\020 \001(\0162\025." +
+      "hbase.pb.Consistency:\006STRONG\022\017\n\007caching\030" +
+      "\021 \001(\r\022\035\n\025allow_partial_results\030\022 \001(\010\0226\n\r",
+      "cf_time_range\030\023 \003(\0132\037.hbase.pb.ColumnFam" +
+      "ilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006region\030\001" +
+      " \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030" +
+      "\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(" +
+      "\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_scann" +
+      "er\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027clien" +
+      "t_handles_partials\030\007 \001(\010\022!\n\031client_handl" +
+      "es_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metric" +
+      "s\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n\014ScanRe" +
+      "sponse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscan",
+      "ner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003tt" +
+      "l\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Resu" +
+      "lt\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_re" +
+      "sult\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001" +
+      "(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_me" +
+      "trics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\240\002\n\024B" +
+      "ulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hb" +
+      "ase.pb.RegionSpecifier\022>\n\013family_path\030\002 " +
+      "\003(\0132).hbase.pb.BulkLoadHFileRequest.Fami" +
+      "lyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n\010fs_tok",
+      "en\030\004 \001(\0132\031.hbase.pb.DelegationToken\022\022\n\nb" +
+      "ulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(\010:\005fals" +
+      "e\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030" +
+      "\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded" +
+      "\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nidentifier\030" +
+      "\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n" +
+      "\007service\030\004 \001(\t\"l\n\026PrepareBulkLoadRequest" +
+      "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" +
+      "e\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpeci" +
+      "fier\"-\n\027PrepareBulkLoadResponse\022\022\n\nbulk_",
+      "token\030\001 \002(\t\"W\n\026CleanupBulkLoadRequest\022\022\n" +
+      "\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\0132\031.hbas" +
+      "e.pb.RegionSpecifier\"\031\n\027CleanupBulkLoadR" +
+      "esponse\"a\n\026CoprocessorServiceCall\022\013\n\003row" +
+      "\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_n" +
+      "ame\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Coprocesso" +
+      "rServiceResult\022&\n\005value\030\001 \001(\0132\027.hbase.pb" +
+      ".NameBytesPair\"v\n\031CoprocessorServiceRequ" +
+      "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" +
+      "cifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.Coproces",
+      "sorServiceCall\"o\n\032CoprocessorServiceResp" +
+      "onse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
+      "ecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb.NameBy" +
+      "tesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mut" +
+      "ation\030\002 \001(\0132\027.hbase.pb.MutationProto\022\032\n\003" +
+      "get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014service_call" +
+      "\030\004 \001(\0132 .hbase.pb.CoprocessorServiceCall" +
+      "\"k\n\014RegionAction\022)\n\006region\030\001 \002(\0132\031.hbase" +
+      ".pb.RegionSpecifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006a" +
+      "ction\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017RegionL",
+      "oadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rhea" +
+      "pOccupancy\030\002 \001(\005:\0010\022\035\n\022compactionPressur" +
+      "e\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStats\022)\n\006re" +
+      "gion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\'\n" +
+      "\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoadStats\"\336" +
+      "\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n\006r" +
+      "esult\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcepti" +
+      "on\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016ser" +
+      "vice_result\030\004 \001(\0132\".hbase.pb.Coprocessor" +
+      "ServiceResult\0220\n\tloadStats\030\005 \001(\0132\031.hbase",
+      ".pb.RegionLoadStatsB\002\030\001\"x\n\022RegionActionR" +
+      "esult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase" +
+      ".pb.ResultOrException\022*\n\texception\030\002 \001(\013" +
+      "2\027.hbase.pb.NameBytesPair\"x\n\014MultiReques" +
+      "t\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Regio" +
+      "nAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition" +
+      "\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n\rMultiRes" +
+      "ponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hbas" +
+      "e.pb.RegionActionResult\022\021\n\tprocessed\030\002 \001" +
+      "(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hbase.pb.",
+      "MultiRegionLoadStats*\'\n\013Consistency\022\n\n\006S" +
+      "TRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientService\022" +
+      "2\n\003Get\022\024.hbase.pb.GetRequest\032\025.hbase.pb." +
+      "GetResponse\022;\n\006Mutate\022\027.hbase.pb.MutateR" +
+      "equest\032\030.hbase.pb.MutateResponse\0225\n\004Scan" +
+      "\022\025.hbase.pb.ScanRequest\032\026.hbase.pb.ScanR" +
+      "esponse\022P\n\rBulkLoadHFile\022\036.hbase.pb.Bulk" +
+      "LoadHFileRequest\032\037.hbase.pb.BulkLoadHFil" +
+      "eResponse\022V\n\017PrepareBulkLoad\022 .hbase.pb." +
+      "PrepareBulkLoadRequest\032!.hbase.pb.Prepar",
+      "eBulkLoadResponse\022V\n\017CleanupBulkLoad\022 .h" +
+      "base.pb.CleanupBulkLoadRequest\032!.hbase.p" +
+      "b.CleanupBulkLoadResponse\022X\n\013ExecService" +
+      "\022#.hbase.pb.CoprocessorServiceRequest\032$." +
+      "hbase.pb.CoprocessorServiceResponse\022d\n\027E" +
+      "xecRegionServerService\022#.hbase.pb.Coproc" +
+      "essorServiceRequest\032$.hbase.pb.Coprocess" +
+      "orServiceResponse\0228\n\005Multi\022\026.hbase.pb.Mu" +
+      "ltiRequest\032\027.hbase.pb.MultiResponseBI\n1o" +
+      "rg.apache.hadoop.hbase.shaded.protobuf.g",
+      "eneratedB\014ClientProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -40555,6 +40776,7 @@ public final class ClientProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.getDescriptor(),
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.getDescriptor(),
         }, assigner);
     internal_static_hbase_pb_Authorizations_descriptor =
       getDescriptor().getMessageTypes().get(0);
@@ -40609,7 +40831,7 @@ public final class ClientProtos {
     internal_static_hbase_pb_MutationProto_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_MutationProto_descriptor,
-        new java.lang.String[] { "Row", "MutateType", "ColumnValue", "Timestamp", "Attribute", "Durability", "TimeRange", "AssociatedCellCount", "Nonce", });
+        new java.lang.String[] { "Row", "MutateType", "ColumnValue", "Timestamp", "Attribute", "Durability", "TimeRange", "AssociatedCellCount", "Nonce", "RowUpdater", });
     internal_static_hbase_pb_MutationProto_ColumnValue_descriptor =
       internal_static_hbase_pb_MutationProto_descriptor.getNestedTypes().get(0);
     internal_static_hbase_pb_MutationProto_ColumnValue_fieldAccessorTable = new
@@ -40777,6 +40999,7 @@ public final class ClientProtos {
     org.apache.hadoop.hbase.shaded.protobuf.generated.CellProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ComparatorProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.getDescriptor();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.getDescriptor();
   }
 
   // @@protoc_insertion_point(outer_class_scope)
diff --git hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RowUpdaterProtos.java hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RowUpdaterProtos.java
new file mode 100644
index 0000000..f42bb25
--- /dev/null
+++ hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RowUpdaterProtos.java
@@ -0,0 +1,698 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RowUpdater.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class RowUpdaterProtos {
+  private RowUpdaterProtos() {}
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) {
+  }
+
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) {
+    registerAllExtensions(
+        (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry);
+  }
+  public interface RowUpdaterOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RowUpdater)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * required string name = 1;
+     */
+    boolean hasName();
+    /**
+     * required string name = 1;
+     */
+    java.lang.String getName();
+    /**
+     * required string name = 1;
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getNameBytes();
+
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    boolean hasSerializedUpdater();
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSerializedUpdater();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RowUpdater}
+   */
+  public  static final class RowUpdater extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RowUpdater)
+      RowUpdaterOrBuilder {
+    // Use RowUpdater.newBuilder() to construct.
+    private RowUpdater(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder builder) {
+      super(builder);
+    }
+    private RowUpdater() {
+      name_ = "";
+      serializedUpdater_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RowUpdater(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              name_ = bs;
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              serializedUpdater_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int NAME_FIELD_NUMBER = 1;
+    private volatile java.lang.Object name_;
+    /**
+     * required string name = 1;
+     */
+    public boolean hasName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * required string name = 1;
+     */
+    public java.lang.String getName() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          name_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * required string name = 1;
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getNameBytes() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        name_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int SERIALIZED_UPDATER_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString serializedUpdater_;
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    public boolean hasSerializedUpdater() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSerializedUpdater() {
+      return serializedUpdater_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, serializedUpdater_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, serializedUpdater_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater) obj;
+
+      boolean result = true;
+      result = result && (hasName() == other.hasName());
+      if (hasName()) {
+        result = result && getName()
+            .equals(other.getName());
+      }
+      result = result && (hasSerializedUpdater() == other.hasSerializedUpdater());
+      if (hasSerializedUpdater()) {
+        result = result && getSerializedUpdater()
+            .equals(other.getSerializedUpdater());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasName()) {
+        hash = (37 * hash) + NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getName().hashCode();
+      }
+      if (hasSerializedUpdater()) {
+        hash = (37 * hash) + SERIALIZED_UPDATER_FIELD_NUMBER;
+        hash = (53 * hash) + getSerializedUpdater().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RowUpdater}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RowUpdater)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        name_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        serializedUpdater_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.name_ = name_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.serializedUpdater_ = serializedUpdater_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance()) return this;
+        if (other.hasName()) {
+          bitField0_ |= 0x00000001;
+          name_ = other.name_;
+          onChanged();
+        }
+        if (other.hasSerializedUpdater()) {
+          setSerializedUpdater(other.getSerializedUpdater());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasName()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object name_ = "";
+      /**
+       * required string name = 1;
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * required string name = 1;
+       */
+      public java.lang.String getName() {
+        java.lang.Object ref = name_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            name_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * required string name = 1;
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getNameBytes() {
+        java.lang.Object ref = name_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          name_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder setName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder clearName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        name_ = getDefaultInstance().getName();
+        onChanged();
+        return this;
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder setNameBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString serializedUpdater_ = org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public boolean hasSerializedUpdater() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString getSerializedUpdater() {
+        return serializedUpdater_;
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public Builder setSerializedUpdater(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        serializedUpdater_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public Builder clearSerializedUpdater() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        serializedUpdater_ = getDefaultInstance().getSerializedUpdater();
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RowUpdater)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RowUpdater)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser() {
+      public RowUpdater parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new RowUpdater(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RowUpdaterProtos.RowUpdater getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_RowUpdater_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_RowUpdater_fieldAccessorTable;
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static  org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\020RowUpdater.proto\022\010hbase.pb\"6\n\nRowUpdat" +
+      "er\022\014\n\004name\030\001 \002(\t\022\032\n\022serialized_updater\030\002" +
+      " \001(\014BJ\n1org.apache.hadoop.hbase.shaded.p" +
+      "rotobuf.generatedB\020RowUpdaterProtosH\001\240\001\001"
+    };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
+          public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
+            descriptor = root;
+            return null;
+          }
+        };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+    internal_static_hbase_pb_RowUpdater_descriptor =
+      getDescriptor().getMessageTypes().get(0);
+    internal_static_hbase_pb_RowUpdater_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_RowUpdater_descriptor,
+        new java.lang.String[] { "Name", "SerializedUpdater", });
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}
diff --git hbase-protocol-shaded/src/main/protobuf/Client.proto hbase-protocol-shaded/src/main/protobuf/Client.proto
index 2feaa26..56b5d38 100644
--- hbase-protocol-shaded/src/main/protobuf/Client.proto
+++ hbase-protocol-shaded/src/main/protobuf/Client.proto
@@ -30,6 +30,7 @@ import "Filter.proto";
 import "Cell.proto";
 import "Comparator.proto";
 import "MapReduce.proto";
+import "RowUpdater.proto";
 
 /**
  * The protocol buffer version of Authorizations.
@@ -167,6 +168,8 @@ message MutationProto {
 
   optional uint64 nonce = 9;
 
+  optional RowUpdater row_updater = 10;
+
   enum Durability {
     USE_DEFAULT  = 0;
     SKIP_WAL     = 1;
@@ -180,6 +183,7 @@ message MutationProto {
     INCREMENT = 1;
     PUT = 2;
     DELETE = 3;
+    UPDATE = 4;
   }
 
   enum DeleteType {
diff --git hbase-protocol-shaded/src/main/protobuf/RowUpdater.proto hbase-protocol-shaded/src/main/protobuf/RowUpdater.proto
new file mode 100644
index 0000000..e180a5a
--- /dev/null
+++ hbase-protocol-shaded/src/main/protobuf/RowUpdater.proto
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are used for cell updaters
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "RowUpdaterProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message RowUpdater {
+  required string name = 1;
+  optional bytes serialized_updater = 2;
+}
\ No newline at end of file
diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index dc050e8..21be07d 100644
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -8390,6 +8390,20 @@ public final class ClientProtos {
      * optional uint64 nonce = 9;
      */
     long getNonce();
+
+    // optional .hbase.pb.RowUpdater row_updater = 10;
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    boolean hasRowUpdater();
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater();
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder();
   }
   /**
    * Protobuf type {@code hbase.pb.MutationProto}
@@ -8522,6 +8536,19 @@ public final class ClientProtos {
               nonce_ = input.readUInt64();
               break;
             }
+            case 82: {
+              org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000080) == 0x00000080)) {
+                subBuilder = rowUpdater_.toBuilder();
+              }
+              rowUpdater_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(rowUpdater_);
+                rowUpdater_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000080;
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -8697,6 +8724,10 @@ public final class ClientProtos {
        * DELETE = 3;
        */
       DELETE(3, 3),
+      /**
+       * UPDATE = 4;
+       */
+      UPDATE(4, 4),
       ;
 
       /**
@@ -8715,6 +8746,10 @@ public final class ClientProtos {
        * DELETE = 3;
        */
       public static final int DELETE_VALUE = 3;
+      /**
+       * UPDATE = 4;
+       */
+      public static final int UPDATE_VALUE = 4;
 
 
       public final int getNumber() { return value; }
@@ -8725,6 +8760,7 @@ public final class ClientProtos {
           case 1: return INCREMENT;
           case 2: return PUT;
           case 3: return DELETE;
+          case 4: return UPDATE;
           default: return null;
         }
       }
@@ -10725,6 +10761,28 @@ public final class ClientProtos {
       return nonce_;
     }
 
+    // optional .hbase.pb.RowUpdater row_updater = 10;
+    public static final int ROW_UPDATER_FIELD_NUMBER = 10;
+    private org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater rowUpdater_;
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public boolean hasRowUpdater() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater() {
+      return rowUpdater_;
+    }
+    /**
+     * optional .hbase.pb.RowUpdater row_updater = 10;
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder() {
+      return rowUpdater_;
+    }
+
     private void initFields() {
       row_ = com.google.protobuf.ByteString.EMPTY;
       mutateType_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType.APPEND;
@@ -10735,6 +10793,7 @@ public final class ClientProtos {
       timeRange_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance();
       associatedCellCount_ = 0;
       nonce_ = 0L;
+      rowUpdater_ = org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -10753,6 +10812,12 @@ public final class ClientProtos {
           return false;
         }
       }
+      if (hasRowUpdater()) {
+        if (!getRowUpdater().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -10787,6 +10852,9 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeUInt64(9, nonce_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeMessage(10, rowUpdater_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -10832,6 +10900,10 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeUInt64Size(9, nonce_);
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(10, rowUpdater_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -10894,6 +10966,11 @@ public final class ClientProtos {
         result = result && (getNonce()
             == other.getNonce());
       }
+      result = result && (hasRowUpdater() == other.hasRowUpdater());
+      if (hasRowUpdater()) {
+        result = result && getRowUpdater()
+            .equals(other.getRowUpdater());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -10943,6 +11020,10 @@ public final class ClientProtos {
         hash = (37 * hash) + NONCE_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getNonce());
       }
+      if (hasRowUpdater()) {
+        hash = (37 * hash) + ROW_UPDATER_FIELD_NUMBER;
+        hash = (53 * hash) + getRowUpdater().hashCode();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -11056,6 +11137,7 @@ public final class ClientProtos {
           getColumnValueFieldBuilder();
           getAttributeFieldBuilder();
           getTimeRangeFieldBuilder();
+          getRowUpdaterFieldBuilder();
         }
       }
       private static Builder create() {
@@ -11094,6 +11176,12 @@ public final class ClientProtos {
         bitField0_ = (bitField0_ & ~0x00000080);
         nonce_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000100);
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
+        } else {
+          rowUpdaterBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000200);
         return this;
       }
 
@@ -11172,6 +11260,14 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00000040;
         }
         result.nonce_ = nonce_;
+        if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        if (rowUpdaterBuilder_ == null) {
+          result.rowUpdater_ = rowUpdater_;
+        } else {
+          result.rowUpdater_ = rowUpdaterBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -11261,6 +11357,9 @@ public final class ClientProtos {
         if (other.hasNonce()) {
           setNonce(other.getNonce());
         }
+        if (other.hasRowUpdater()) {
+          mergeRowUpdater(other.getRowUpdater());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -11278,6 +11377,12 @@ public final class ClientProtos {
             return false;
           }
         }
+        if (hasRowUpdater()) {
+          if (!getRowUpdater().isInitialized()) {
+            
+            return false;
+          }
+        }
         return true;
       }
 
@@ -12185,6 +12290,123 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional .hbase.pb.RowUpdater row_updater = 10;
+      private org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater rowUpdater_ = org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder> rowUpdaterBuilder_;
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public boolean hasRowUpdater() {
+        return ((bitField0_ & 0x00000200) == 0x00000200);
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater getRowUpdater() {
+        if (rowUpdaterBuilder_ == null) {
+          return rowUpdater_;
+        } else {
+          return rowUpdaterBuilder_.getMessage();
+        }
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder setRowUpdater(org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater value) {
+        if (rowUpdaterBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          rowUpdater_ = value;
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder setRowUpdater(
+          org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder builderForValue) {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = builderForValue.build();
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder mergeRowUpdater(org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater value) {
+        if (rowUpdaterBuilder_ == null) {
+          if (((bitField0_ & 0x00000200) == 0x00000200) &&
+              rowUpdater_ != org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance()) {
+            rowUpdater_ =
+              org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.newBuilder(rowUpdater_).mergeFrom(value).buildPartial();
+          } else {
+            rowUpdater_ = value;
+          }
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000200;
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public Builder clearRowUpdater() {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdater_ = org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
+          onChanged();
+        } else {
+          rowUpdaterBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000200);
+        return this;
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder getRowUpdaterBuilder() {
+        bitField0_ |= 0x00000200;
+        onChanged();
+        return getRowUpdaterFieldBuilder().getBuilder();
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder getRowUpdaterOrBuilder() {
+        if (rowUpdaterBuilder_ != null) {
+          return rowUpdaterBuilder_.getMessageOrBuilder();
+        } else {
+          return rowUpdater_;
+        }
+      }
+      /**
+       * optional .hbase.pb.RowUpdater row_updater = 10;
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder> 
+          getRowUpdaterFieldBuilder() {
+        if (rowUpdaterBuilder_ == null) {
+          rowUpdaterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder>(
+                  rowUpdater_,
+                  getParentForChildren(),
+                  isClean());
+          rowUpdater_ = null;
+        }
+        return rowUpdaterBuilder_;
+      }
+
       // @@protoc_insertion_point(builder_scope:hbase.pb.MutationProto)
     }
 
@@ -39451,163 +39673,165 @@ public final class ClientProtos {
     java.lang.String[] descriptorData = {
       "\n\014Client.proto\022\010hbase.pb\032\013HBase.proto\032\014F" +
       "ilter.proto\032\nCell.proto\032\020Comparator.prot" +
-      "o\032\017MapReduce.proto\"\037\n\016Authorizations\022\r\n\005" +
-      "label\030\001 \003(\t\"$\n\016CellVisibility\022\022\n\nexpress" +
-      "ion\030\001 \002(\t\"+\n\006Column\022\016\n\006family\030\001 \002(\014\022\021\n\tq" +
-      "ualifier\030\002 \003(\014\"\276\003\n\003Get\022\013\n\003row\030\001 \002(\014\022 \n\006c" +
-      "olumn\030\002 \003(\0132\020.hbase.pb.Column\022*\n\tattribu" +
-      "te\030\003 \003(\0132\027.hbase.pb.NameBytesPair\022 \n\006fil" +
-      "ter\030\004 \001(\0132\020.hbase.pb.Filter\022\'\n\ntime_rang" +
-      "e\030\005 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014max_vers",
-      "ions\030\006 \001(\r:\0011\022\032\n\014cache_blocks\030\007 \001(\010:\004tru" +
-      "e\022\023\n\013store_limit\030\010 \001(\r\022\024\n\014store_offset\030\t" +
-      " \001(\r\022\035\n\016existence_only\030\n \001(\010:\005false\0222\n\013c" +
-      "onsistency\030\014 \001(\0162\025.hbase.pb.Consistency:" +
-      "\006STRONG\0226\n\rcf_time_range\030\r \003(\0132\037.hbase.p" +
-      "b.ColumnFamilyTimeRange\022&\n\036load_column_f" +
-      "amilies_on_demand\030\016 \001(\010\"\203\001\n\006Result\022\034\n\004ce" +
-      "ll\030\001 \003(\0132\016.hbase.pb.Cell\022\035\n\025associated_c" +
-      "ell_count\030\002 \001(\005\022\016\n\006exists\030\003 \001(\010\022\024\n\005stale" +
-      "\030\004 \001(\010:\005false\022\026\n\007partial\030\005 \001(\010:\005false\"S\n",
-      "\nGetRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
-      "egionSpecifier\022\032\n\003get\030\002 \002(\0132\r.hbase.pb.G" +
-      "et\"/\n\013GetResponse\022 \n\006result\030\001 \001(\0132\020.hbas" +
-      "e.pb.Result\"\222\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016" +
-      "\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022+\n\014com" +
-      "pare_type\030\004 \002(\0162\025.hbase.pb.CompareType\022(" +
-      "\n\ncomparator\030\005 \002(\0132\024.hbase.pb.Comparator" +
-      "\"\364\006\n\rMutationProto\022\013\n\003row\030\001 \001(\014\0229\n\013mutat" +
-      "e_type\030\002 \001(\0162$.hbase.pb.MutationProto.Mu" +
-      "tationType\0229\n\014column_value\030\003 \003(\0132#.hbase",
-      ".pb.MutationProto.ColumnValue\022\021\n\ttimesta" +
-      "mp\030\004 \001(\004\022*\n\tattribute\030\005 \003(\0132\027.hbase.pb.N" +
-      "ameBytesPair\022C\n\ndurability\030\006 \001(\0162\".hbase" +
-      ".pb.MutationProto.Durability:\013USE_DEFAUL" +
-      "T\022\'\n\ntime_range\030\007 \001(\0132\023.hbase.pb.TimeRan" +
-      "ge\022\035\n\025associated_cell_count\030\010 \001(\005\022\r\n\005non" +
-      "ce\030\t \001(\004\032\371\001\n\013ColumnValue\022\016\n\006family\030\001 \002(\014" +
-      "\022K\n\017qualifier_value\030\002 \003(\01322.hbase.pb.Mut" +
-      "ationProto.ColumnValue.QualifierValue\032\214\001" +
-      "\n\016QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005v",
-      "alue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\0227\n\013delete_" +
-      "type\030\004 \001(\0162\".hbase.pb.MutationProto.Dele" +
-      "teType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022\017\n\013US" +
-      "E_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002" +
-      "\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mutatio" +
-      "nType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020" +
-      "\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ON" +
-      "E_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020" +
-      "\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VE" +
-      "RSION\020\003\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(",
-      "\0132\031.hbase.pb.RegionSpecifier\022)\n\010mutation" +
-      "\030\002 \002(\0132\027.hbase.pb.MutationProto\022&\n\tcondi" +
-      "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
-      "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
-      "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
-      "\001(\010\"\275\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
-      "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
-      "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
-      "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
-      "r\022\'\n\ntime_range\030\006 \001(\0132\023.hbase.pb.TimeRan",
-      "ge\022\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blo" +
-      "cks\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017m" +
-      "ax_result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(" +
-      "\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036load_column_f" +
-      "amilies_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027" +
-      "\n\010reversed\030\017 \001(\010:\005false\0222\n\013consistency\030\020" +
-      " \001(\0162\025.hbase.pb.Consistency:\006STRONG\022\017\n\007c" +
-      "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
-      "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
-      "lumnFamilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006r",
-      "egion\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034" +
-      "\n\004scan\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_" +
-      "id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclos" +
-      "e_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037" +
-      "\n\027client_handles_partials\030\007 \001(\010\022!\n\031clien" +
-      "t_handles_heartbeats\030\010 \001(\010\022\032\n\022track_scan" +
-      "_metrics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n" +
-      "\014ScanResponse\022\030\n\020cells_per_result\030\001 \003(\r\022" +
-      "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" +
-      "\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.",
-      "pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag" +
-      "_per_result\030\007 \003(\010\022\036\n\026more_results_in_reg" +
-      "ion\030\010 \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014" +
-      "scan_metrics\030\n \001(\0132\025.hbase.pb.ScanMetric" +
-      "s\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001 \002" +
-      "(\0132\031.hbase.pb.RegionSpecifier\022>\n\013family_" +
-      "path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReque" +
-      "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n" +
-      "\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationTok" +
-      "en\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(",
-      "\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014" +
-      "\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n" +
-      "\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\niden" +
-      "tifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003" +
-      " \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLoad" +
-      "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" +
-      "ableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Regi" +
-      "onSpecifier\"-\n\027PrepareBulkLoadResponse\022\022" +
-      "\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadReq" +
-      "uest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\013",
-      "2\031.hbase.pb.RegionSpecifier\"\031\n\027CleanupBu" +
-      "lkLoadResponse\"a\n\026CoprocessorServiceCall" +
-      "\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013m" +
-      "ethod_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Cop" +
-      "rocessorServiceResult\022&\n\005value\030\001 \001(\0132\027.h" +
-      "base.pb.NameBytesPair\"v\n\031CoprocessorServ" +
-      "iceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" +
-      "gionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.C" +
-      "oprocessorServiceCall\"o\n\032CoprocessorServ" +
-      "iceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R",
-      "egionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb" +
-      ".NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r" +
-      "\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationPr" +
-      "oto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014servi" +
-      "ce_call\030\004 \001(\0132 .hbase.pb.CoprocessorServ" +
-      "iceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(\0132" +
-      "\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 \001" +
-      "(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017" +
-      "RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010" +
-      "\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compaction",
-      "Pressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStat" +
-      "s\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpeci" +
-      "fier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoad" +
-      "Stats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001 \001" +
-      "(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\t" +
-      "exception\030\003 \001(\0132\027.hbase.pb.NameBytesPair" +
-      "\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Copr" +
-      "ocessorServiceResult\0220\n\tloadStats\030\005 \001(\0132" +
-      "\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Region" +
-      "ActionResult\0226\n\021resultOrException\030\001 \003(\0132",
-      "\033.hbase.pb.ResultOrException\022*\n\texceptio" +
-      "n\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mult" +
-      "iRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase.p" +
-      "b.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tco" +
-      "ndition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n\rM" +
-      "ultiResponse\0228\n\022regionActionResult\030\001 \003(\013" +
-      "2\034.hbase.pb.RegionActionResult\022\021\n\tproces" +
-      "sed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hb" +
-      "ase.pb.MultiRegionLoadStats*\'\n\013Consisten" +
-      "cy\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientS",
-      "ervice\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025.hb" +
-      "ase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.pb." +
-      "MutateRequest\032\030.hbase.pb.MutateResponse\022" +
-      "5\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase.p" +
-      "b.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbase." +
-      "pb.BulkLoadHFileRequest\032\037.hbase.pb.BulkL" +
-      "oadHFileResponse\022V\n\017PrepareBulkLoad\022 .hb" +
-      "ase.pb.PrepareBulkLoadRequest\032!.hbase.pb" +
-      ".PrepareBulkLoadResponse\022V\n\017CleanupBulkL" +
-      "oad\022 .hbase.pb.CleanupBulkLoadRequest\032!.",
-      "hbase.pb.CleanupBulkLoadResponse\022X\n\013Exec" +
-      "Service\022#.hbase.pb.CoprocessorServiceReq" +
-      "uest\032$.hbase.pb.CoprocessorServiceRespon" +
-      "se\022d\n\027ExecRegionServerService\022#.hbase.pb" +
-      ".CoprocessorServiceRequest\032$.hbase.pb.Co" +
-      "processorServiceResponse\0228\n\005Multi\022\026.hbas" +
-      "e.pb.MultiRequest\032\027.hbase.pb.MultiRespon" +
-      "seBB\n*org.apache.hadoop.hbase.protobuf.g" +
-      "eneratedB\014ClientProtosH\001\210\001\001\240\001\001"
+      "o\032\017MapReduce.proto\032\020RowUpdater.proto\"\037\n\016" +
+      "Authorizations\022\r\n\005label\030\001 \003(\t\"$\n\016CellVis" +
+      "ibility\022\022\n\nexpression\030\001 \002(\t\"+\n\006Column\022\016\n" +
+      "\006family\030\001 \002(\014\022\021\n\tqualifier\030\002 \003(\014\"\276\003\n\003Get" +
+      "\022\013\n\003row\030\001 \002(\014\022 \n\006column\030\002 \003(\0132\020.hbase.pb" +
+      ".Column\022*\n\tattribute\030\003 \003(\0132\027.hbase.pb.Na" +
+      "meBytesPair\022 \n\006filter\030\004 \001(\0132\020.hbase.pb.F" +
+      "ilter\022\'\n\ntime_range\030\005 \001(\0132\023.hbase.pb.Tim",
+      "eRange\022\027\n\014max_versions\030\006 \001(\r:\0011\022\032\n\014cache" +
+      "_blocks\030\007 \001(\010:\004true\022\023\n\013store_limit\030\010 \001(\r" +
+      "\022\024\n\014store_offset\030\t \001(\r\022\035\n\016existence_only" +
+      "\030\n \001(\010:\005false\0222\n\013consistency\030\014 \001(\0162\025.hba" +
+      "se.pb.Consistency:\006STRONG\0226\n\rcf_time_ran" +
+      "ge\030\r \003(\0132\037.hbase.pb.ColumnFamilyTimeRang" +
+      "e\022&\n\036load_column_families_on_demand\030\016 \001(" +
+      "\010\"\203\001\n\006Result\022\034\n\004cell\030\001 \003(\0132\016.hbase.pb.Ce" +
+      "ll\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exi" +
+      "sts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007parti",
+      "al\030\005 \001(\010:\005false\"S\n\nGetRequest\022)\n\006region\030" +
+      "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\032\n\003get\030" +
+      "\002 \002(\0132\r.hbase.pb.Get\"/\n\013GetResponse\022 \n\006r" +
+      "esult\030\001 \001(\0132\020.hbase.pb.Result\"\222\001\n\tCondit" +
+      "ion\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqual" +
+      "ifier\030\003 \002(\014\022+\n\014compare_type\030\004 \002(\0162\025.hbas" +
+      "e.pb.CompareType\022(\n\ncomparator\030\005 \002(\0132\024.h" +
+      "base.pb.Comparator\"\253\007\n\rMutationProto\022\013\n\003" +
+      "row\030\001 \001(\014\0229\n\013mutate_type\030\002 \001(\0162$.hbase.p" +
+      "b.MutationProto.MutationType\0229\n\014column_v",
+      "alue\030\003 \003(\0132#.hbase.pb.MutationProto.Colu" +
+      "mnValue\022\021\n\ttimestamp\030\004 \001(\004\022*\n\tattribute\030" +
+      "\005 \003(\0132\027.hbase.pb.NameBytesPair\022C\n\ndurabi" +
+      "lity\030\006 \001(\0162\".hbase.pb.MutationProto.Dura" +
+      "bility:\013USE_DEFAULT\022\'\n\ntime_range\030\007 \001(\0132" +
+      "\023.hbase.pb.TimeRange\022\035\n\025associated_cell_" +
+      "count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\022)\n\013row_update" +
+      "r\030\n \001(\0132\024.hbase.pb.RowUpdater\032\371\001\n\013Column" +
+      "Value\022\016\n\006family\030\001 \002(\014\022K\n\017qualifier_value" +
+      "\030\002 \003(\01322.hbase.pb.MutationProto.ColumnVa",
+      "lue.QualifierValue\032\214\001\n\016QualifierValue\022\021\n" +
+      "\tqualifier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimes" +
+      "tamp\030\003 \001(\004\0227\n\013delete_type\030\004 \001(\0162\".hbase." +
+      "pb.MutationProto.DeleteType\022\014\n\004tags\030\005 \001(" +
+      "\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP" +
+      "_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tF" +
+      "SYNC_WAL\020\004\"J\n\014MutationType\022\n\n\006APPEND\020\000\022\r" +
+      "\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE\020\003\022\n\n\006UP" +
+      "DATE\020\004\"p\n\nDeleteType\022\026\n\022DELETE_ONE_VERSI" +
+      "ON\020\000\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDE",
+      "LETE_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003" +
+      "\"\242\001\n\rMutateRequest\022)\n\006region\030\001 \002(\0132\031.hba" +
+      "se.pb.RegionSpecifier\022)\n\010mutation\030\002 \002(\0132" +
+      "\027.hbase.pb.MutationProto\022&\n\tcondition\030\003 " +
+      "\001(\0132\023.hbase.pb.Condition\022\023\n\013nonce_group\030" +
+      "\004 \001(\004\"E\n\016MutateResponse\022 \n\006result\030\001 \001(\0132" +
+      "\020.hbase.pb.Result\022\021\n\tprocessed\030\002 \001(\010\"\275\004\n" +
+      "\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb.Column\022" +
+      "*\n\tattribute\030\002 \003(\0132\027.hbase.pb.NameBytesP" +
+      "air\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014",
+      "\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filter\022\'\n\nti" +
+      "me_range\030\006 \001(\0132\023.hbase.pb.TimeRange\022\027\n\014m" +
+      "ax_versions\030\007 \001(\r:\0011\022\032\n\014cache_blocks\030\010 \001" +
+      "(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_resu" +
+      "lt_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024\n\014st" +
+      "ore_offset\030\014 \001(\r\022&\n\036load_column_families" +
+      "_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010rever" +
+      "sed\030\017 \001(\010:\005false\0222\n\013consistency\030\020 \001(\0162\025." +
+      "hbase.pb.Consistency:\006STRONG\022\017\n\007caching\030" +
+      "\021 \001(\r\022\035\n\025allow_partial_results\030\022 \001(\010\0226\n\r",
+      "cf_time_range\030\023 \003(\0132\037.hbase.pb.ColumnFam" +
+      "ilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006region\030\001" +
+      " \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030" +
+      "\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(" +
+      "\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_scann" +
+      "er\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027clien" +
+      "t_handles_partials\030\007 \001(\010\022!\n\031client_handl" +
+      "es_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metric" +
+      "s\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n\014ScanRe" +
+      "sponse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscan",
+      "ner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003tt" +
+      "l\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Resu" +
+      "lt\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_re" +
+      "sult\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001" +
+      "(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_me" +
+      "trics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\240\002\n\024B" +
+      "ulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031.hb" +
+      "ase.pb.RegionSpecifier\022>\n\013family_path\030\002 " +
+      "\003(\0132).hbase.pb.BulkLoadHFileRequest.Fami" +
+      "lyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022+\n\010fs_tok",
+      "en\030\004 \001(\0132\031.hbase.pb.DelegationToken\022\022\n\nb" +
+      "ulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 \001(\010:\005fals" +
+      "e\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030" +
+      "\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded" +
+      "\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nidentifier\030" +
+      "\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n" +
+      "\007service\030\004 \001(\t\"l\n\026PrepareBulkLoadRequest" +
+      "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" +
+      "e\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpeci" +
+      "fier\"-\n\027PrepareBulkLoadResponse\022\022\n\nbulk_",
+      "token\030\001 \002(\t\"W\n\026CleanupBulkLoadRequest\022\022\n" +
+      "\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001(\0132\031.hbas" +
+      "e.pb.RegionSpecifier\"\031\n\027CleanupBulkLoadR" +
+      "esponse\"a\n\026CoprocessorServiceCall\022\013\n\003row" +
+      "\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_n" +
+      "ame\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Coprocesso" +
+      "rServiceResult\022&\n\005value\030\001 \001(\0132\027.hbase.pb" +
+      ".NameBytesPair\"v\n\031CoprocessorServiceRequ" +
+      "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" +
+      "cifier\022.\n\004call\030\002 \002(\0132 .hbase.pb.Coproces",
+      "sorServiceCall\"o\n\032CoprocessorServiceResp" +
+      "onse\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
+      "ecifier\022&\n\005value\030\002 \002(\0132\027.hbase.pb.NameBy" +
+      "tesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mut" +
+      "ation\030\002 \001(\0132\027.hbase.pb.MutationProto\022\032\n\003" +
+      "get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014service_call" +
+      "\030\004 \001(\0132 .hbase.pb.CoprocessorServiceCall" +
+      "\"k\n\014RegionAction\022)\n\006region\030\001 \002(\0132\031.hbase" +
+      ".pb.RegionSpecifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006a" +
+      "ction\030\003 \003(\0132\020.hbase.pb.Action\"c\n\017RegionL",
+      "oadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rhea" +
+      "pOccupancy\030\002 \001(\005:\0010\022\035\n\022compactionPressur" +
+      "e\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadStats\022)\n\006re" +
+      "gion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\'\n" +
+      "\004stat\030\002 \003(\0132\031.hbase.pb.RegionLoadStats\"\336" +
+      "\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n\006r" +
+      "esult\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcepti" +
+      "on\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016ser" +
+      "vice_result\030\004 \001(\0132\".hbase.pb.Coprocessor" +
+      "ServiceResult\0220\n\tloadStats\030\005 \001(\0132\031.hbase",
+      ".pb.RegionLoadStatsB\002\030\001\"x\n\022RegionActionR" +
+      "esult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase" +
+      ".pb.ResultOrException\022*\n\texception\030\002 \001(\013" +
+      "2\027.hbase.pb.NameBytesPair\"x\n\014MultiReques" +
+      "t\022,\n\014regionAction\030\001 \003(\0132\026.hbase.pb.Regio" +
+      "nAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition" +
+      "\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n\rMultiRes" +
+      "ponse\0228\n\022regionActionResult\030\001 \003(\0132\034.hbas" +
+      "e.pb.RegionActionResult\022\021\n\tprocessed\030\002 \001" +
+      "(\010\0228\n\020regionStatistics\030\003 \001(\0132\036.hbase.pb.",
+      "MultiRegionLoadStats*\'\n\013Consistency\022\n\n\006S" +
+      "TRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClientService\022" +
+      "2\n\003Get\022\024.hbase.pb.GetRequest\032\025.hbase.pb." +
+      "GetResponse\022;\n\006Mutate\022\027.hbase.pb.MutateR" +
+      "equest\032\030.hbase.pb.MutateResponse\0225\n\004Scan" +
+      "\022\025.hbase.pb.ScanRequest\032\026.hbase.pb.ScanR" +
+      "esponse\022P\n\rBulkLoadHFile\022\036.hbase.pb.Bulk" +
+      "LoadHFileRequest\032\037.hbase.pb.BulkLoadHFil" +
+      "eResponse\022V\n\017PrepareBulkLoad\022 .hbase.pb." +
+      "PrepareBulkLoadRequest\032!.hbase.pb.Prepar",
+      "eBulkLoadResponse\022V\n\017CleanupBulkLoad\022 .h" +
+      "base.pb.CleanupBulkLoadRequest\032!.hbase.p" +
+      "b.CleanupBulkLoadResponse\022X\n\013ExecService" +
+      "\022#.hbase.pb.CoprocessorServiceRequest\032$." +
+      "hbase.pb.CoprocessorServiceResponse\022d\n\027E" +
+      "xecRegionServerService\022#.hbase.pb.Coproc" +
+      "essorServiceRequest\032$.hbase.pb.Coprocess" +
+      "orServiceResponse\0228\n\005Multi\022\026.hbase.pb.Mu" +
+      "ltiRequest\032\027.hbase.pb.MultiResponseBB\n*o" +
+      "rg.apache.hadoop.hbase.protobuf.generate",
+      "dB\014ClientProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -39667,7 +39891,7 @@ public final class ClientProtos {
           internal_static_hbase_pb_MutationProto_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_MutationProto_descriptor,
-              new java.lang.String[] { "Row", "MutateType", "ColumnValue", "Timestamp", "Attribute", "Durability", "TimeRange", "AssociatedCellCount", "Nonce", });
+              new java.lang.String[] { "Row", "MutateType", "ColumnValue", "Timestamp", "Attribute", "Durability", "TimeRange", "AssociatedCellCount", "Nonce", "RowUpdater", });
           internal_static_hbase_pb_MutationProto_ColumnValue_descriptor =
             internal_static_hbase_pb_MutationProto_descriptor.getNestedTypes().get(0);
           internal_static_hbase_pb_MutationProto_ColumnValue_fieldAccessorTable = new
@@ -39841,6 +40065,7 @@ public final class ClientProtos {
           org.apache.hadoop.hbase.protobuf.generated.CellProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos.getDescriptor(),
           org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.getDescriptor(),
+          org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.getDescriptor(),
         }, assigner);
   }
 
diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowUpdaterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowUpdaterProtos.java
new file mode 100644
index 0000000..92d87e7
--- /dev/null
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowUpdaterProtos.java
@@ -0,0 +1,661 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RowUpdater.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RowUpdaterProtos {
+  private RowUpdaterProtos() {}
+  public static void registerAllExtensions(
+      com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RowUpdaterOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string name = 1;
+    /**
+     * required string name = 1;
+     */
+    boolean hasName();
+    /**
+     * required string name = 1;
+     */
+    java.lang.String getName();
+    /**
+     * required string name = 1;
+     */
+    com.google.protobuf.ByteString
+        getNameBytes();
+
+    // optional bytes serialized_updater = 2;
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    boolean hasSerializedUpdater();
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    com.google.protobuf.ByteString getSerializedUpdater();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RowUpdater}
+   */
+  public static final class RowUpdater extends
+      com.google.protobuf.GeneratedMessage
+      implements RowUpdaterOrBuilder {
+    // Use RowUpdater.newBuilder() to construct.
+    private RowUpdater(com.google.protobuf.GeneratedMessage.Builder builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private RowUpdater(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final RowUpdater defaultInstance;
+    public static RowUpdater getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public RowUpdater getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RowUpdater(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              name_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              serializedUpdater_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.class, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser PARSER =
+        new com.google.protobuf.AbstractParser() {
+      public RowUpdater parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new RowUpdater(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string name = 1;
+    public static final int NAME_FIELD_NUMBER = 1;
+    private java.lang.Object name_;
+    /**
+     * required string name = 1;
+     */
+    public boolean hasName() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * required string name = 1;
+     */
+    public java.lang.String getName() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          name_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * required string name = 1;
+     */
+    public com.google.protobuf.ByteString
+        getNameBytes() {
+      java.lang.Object ref = name_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        name_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // optional bytes serialized_updater = 2;
+    public static final int SERIALIZED_UPDATER_FIELD_NUMBER = 2;
+    private com.google.protobuf.ByteString serializedUpdater_;
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    public boolean hasSerializedUpdater() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * optional bytes serialized_updater = 2;
+     */
+    public com.google.protobuf.ByteString getSerializedUpdater() {
+      return serializedUpdater_;
+    }
+
+    private void initFields() {
+      name_ = "";
+      serializedUpdater_ = com.google.protobuf.ByteString.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, serializedUpdater_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getNameBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, serializedUpdater_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater other = (org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater) obj;
+
+      boolean result = true;
+      result = result && (hasName() == other.hasName());
+      if (hasName()) {
+        result = result && getName()
+            .equals(other.getName());
+      }
+      result = result && (hasSerializedUpdater() == other.hasSerializedUpdater());
+      if (hasSerializedUpdater()) {
+        result = result && getSerializedUpdater()
+            .equals(other.getSerializedUpdater());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasName()) {
+        hash = (37 * hash) + NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getName().hashCode();
+      }
+      if (hasSerializedUpdater()) {
+        hash = (37 * hash) + SERIALIZED_UPDATER_FIELD_NUMBER;
+        hash = (53 * hash) + getSerializedUpdater().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RowUpdater}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder
+       implements org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdaterOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.class, org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        name_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        serializedUpdater_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.internal_static_hbase_pb_RowUpdater_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater build() {
+        org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater result = new org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.name_ = name_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.serializedUpdater_ = serializedUpdater_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater.getDefaultInstance()) return this;
+        if (other.hasName()) {
+          bitField0_ |= 0x00000001;
+          name_ = other.name_;
+          onChanged();
+        }
+        if (other.hasSerializedUpdater()) {
+          setSerializedUpdater(other.getSerializedUpdater());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasName()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RowUpdaterProtos.RowUpdater) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string name = 1;
+      private java.lang.Object name_ = "";
+      /**
+       * required string name = 1;
+       */
+      public boolean hasName() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * required string name = 1;
+       */
+      public java.lang.String getName() {
+        java.lang.Object ref = name_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          name_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * required string name = 1;
+       */
+      public com.google.protobuf.ByteString
+          getNameBytes() {
+        java.lang.Object ref = name_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          name_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder setName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder clearName() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        name_ = getDefaultInstance().getName();
+        onChanged();
+        return this;
+      }
+      /**
+       * required string name = 1;
+       */
+      public Builder setNameBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        name_ = value;
+        onChanged();
+        return this;
+      }
+
+      // optional bytes serialized_updater = 2;
+      private com.google.protobuf.ByteString serializedUpdater_ = com.google.protobuf.ByteString.EMPTY;
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public boolean hasSerializedUpdater() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public com.google.protobuf.ByteString getSerializedUpdater() {
+        return serializedUpdater_;
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public Builder setSerializedUpdater(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        serializedUpdater_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * optional bytes serialized_updater = 2;
+       */
+      public Builder clearSerializedUpdater() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        serializedUpdater_ = getDefaultInstance().getSerializedUpdater();
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RowUpdater)
+    }
+
+    static {
+      defaultInstance = new RowUpdater(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RowUpdater)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_RowUpdater_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_RowUpdater_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\020RowUpdater.proto\022\010hbase.pb\"6\n\nRowUpdat" +
+      "er\022\014\n\004name\030\001 \002(\t\022\032\n\022serialized_updater\030\002" +
+      " \001(\014BC\n*org.apache.hadoop.hbase.protobuf" +
+      ".generatedB\020RowUpdaterProtosH\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_hbase_pb_RowUpdater_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_hbase_pb_RowUpdater_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_RowUpdater_descriptor,
+              new java.lang.String[] { "Name", "SerializedUpdater", });
+          return null;
+        }
+      };
+    com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}
diff --git hbase-protocol/src/main/protobuf/Client.proto hbase-protocol/src/main/protobuf/Client.proto
index 237b932..b4ee3e4 100644
--- hbase-protocol/src/main/protobuf/Client.proto
+++ hbase-protocol/src/main/protobuf/Client.proto
@@ -30,6 +30,7 @@ import "Filter.proto";
 import "Cell.proto";
 import "Comparator.proto";
 import "MapReduce.proto";
+import "RowUpdater.proto";
 
 /**
  * The protocol buffer version of Authorizations.
@@ -167,6 +168,8 @@ message MutationProto {
 
   optional uint64 nonce = 9;
 
+  optional RowUpdater row_updater = 10;
+
   enum Durability {
     USE_DEFAULT  = 0;
     SKIP_WAL     = 1;
@@ -180,6 +183,7 @@ message MutationProto {
     INCREMENT = 1;
     PUT = 2;
     DELETE = 3;
+    UPDATE = 4;
   }
 
   enum DeleteType {
diff --git hbase-protocol/src/main/protobuf/RowUpdater.proto hbase-protocol/src/main/protobuf/RowUpdater.proto
new file mode 100644
index 0000000..6722dcb
--- /dev/null
+++ hbase-protocol/src/main/protobuf/RowUpdater.proto
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are used for cell updaters
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "RowUpdaterProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+message RowUpdater {
+  required string name = 1;
+  optional bytes serialized_updater = 2;
+}
\ No newline at end of file
diff --git hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 5debf39..0288576 100644
--- hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -73,6 +73,7 @@ import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.client.Update;
 
 /**
  * HTable interface to remote tables accessed via REST gateway
@@ -510,6 +511,11 @@ public class RemoteHTable implements Table {
     // no-op
   }
 
+  @Override
+  public Result update(Update update) throws IOException {
+    throw new IOException("Update not supported");
+  }
+
   class Scanner implements ResultScanner {
 
     String uri;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index 6a73261..9e16648 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -103,10 +103,12 @@ public final class HTableWrapper implements Table {
     }
   }
 
+  @Override
   public Configuration getConfiguration() {
     return table.getConfiguration();
   }
 
+  @Override
   public void close() throws IOException {
     try {
       internalClose();
@@ -115,14 +117,17 @@ public final class HTableWrapper implements Table {
     }
   }
 
+  @Override
   public Result get(Get get) throws IOException {
     return table.get(get);
   }
 
+  @Override
   public boolean exists(Get get) throws IOException {
     return table.exists(get);
   }
 
+  @Override
   public boolean[] existsAll(List gets) throws IOException{
     return table.existsAll(gets);
   }
@@ -144,47 +149,57 @@ public final class HTableWrapper implements Table {
     return results;
   }
 
+  @Override
   public void put(Put put) throws IOException {
     table.put(put);
   }
 
+  @Override
   public void put(List puts) throws IOException {
     table.put(puts);
   }
 
+  @Override
   public void delete(Delete delete) throws IOException {
     table.delete(delete);
   }
 
+  @Override
   public void delete(List deletes) throws IOException {
     table.delete(deletes);
   }
 
+  @Override
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Put put) throws IOException {
     return table.checkAndPut(row, family, qualifier, value, put);
   }
 
+  @Override
   public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Put put) throws IOException {
     return table.checkAndPut(row, family, qualifier, compareOp, value, put);
   }
 
+  @Override
   public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
       byte[] value, Delete delete) throws IOException {
     return table.checkAndDelete(row, family, qualifier, value, delete);
   }
 
+  @Override
   public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Delete delete) throws IOException {
     return table.checkAndDelete(row, family, qualifier, compareOp, value, delete);
   }
 
+  @Override
   public long incrementColumnValue(byte[] row, byte[] family,
       byte[] qualifier, long amount) throws IOException {
     return table.incrementColumnValue(row, family, qualifier, amount);
   }
 
+  @Override
   public long incrementColumnValue(byte[] row, byte[] family,
       byte[] qualifier, long amount, Durability durability)
       throws IOException {
@@ -202,19 +217,23 @@ public final class HTableWrapper implements Table {
     return table.increment(increment);
   }
 
+  @Override
   public ResultScanner getScanner(Scan scan) throws IOException {
     return table.getScanner(scan);
   }
 
+  @Override
   public ResultScanner getScanner(byte[] family) throws IOException {
     return table.getScanner(family);
   }
 
+  @Override
   public ResultScanner getScanner(byte[] family, byte[] qualifier)
       throws IOException {
     return table.getScanner(family, qualifier);
   }
 
+  @Override
   public HTableDescriptor getTableDescriptor() throws IOException {
     return table.getTableDescriptor();
   }
@@ -330,4 +349,10 @@ public final class HTableWrapper implements Table {
 
   @Override
   public int getReadRpcTimeout() { return table.getReadRpcTimeout(); }
+
+  @Override
+  public Result update(Update update) throws IOException {
+    return table.update(update);
+  }
+
 }
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index 9f033c0..ace559d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.WALKey;
 
 import com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hbase.client.Update;
 
 /**
  * An abstract class that implements RegionObserver.
@@ -402,6 +403,24 @@ public class BaseRegionObserver implements RegionObserver {
   }
 
   @Override
+  public Result preUpdate(final ObserverContext e,
+      final Update update) throws IOException {
+    return null;
+  }
+
+  @Override
+  public Result preUpdateAfterRowLock(final ObserverContext e,
+      final Update update) throws IOException {
+    return null;
+  }
+
+  @Override
+  public Result postUpdate(final ObserverContext e,
+      final Update update, final Result result) throws IOException {
+    return result;
+  }
+
+  @Override
   public RegionScanner preScannerOpen(final ObserverContext e,
       final Scan scan, final RegionScanner s) throws IOException {
     return s;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index ccdce03..fb1d96c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hbase.client.Update;
 
 /**
  * Coprocessors implement this interface to observe and mediate client actions
@@ -1059,6 +1060,65 @@ public interface RegionObserver extends Coprocessor {
     throws IOException;
 
   /**
+   * Called before Update.
+   * 

+ * Call CoprocessorEnvironment#bypass to skip default actions + *

+ * Call CoprocessorEnvironment#complete to skip any subsequent chained + * coprocessors + *

+ * Note: Do not retain references to any Cells in 'delta' beyond the life of this invocation. + * If need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param update update object + * @return result to return to the client if bypassing default processing + * @throws IOException if an error occurred on the coprocessor + */ + Result preUpdate(final ObserverContext c, + final Update update) + throws IOException; + + /** + * Called before Update but after acquiring rowlock. + *

+ * Note: Caution to be taken for not doing any long time operation in this hook. + * Row will be locked for longer time. Trying to acquire lock on another row, within this, + * can lead to potential deadlock. + *

+ * Call CoprocessorEnvironment#bypass to skip default actions + *

+ * Call CoprocessorEnvironment#complete to skip any subsequent chained coprocessors + *

+ * Note: Do not retain references to any Cells in 'delta' beyond the life of this invocation. + * If need a Cell reference for later use, copy the cell and use that. + * + * @param c the environment provided by the region server + * @param update update object + * @return result to return to the client if bypassing default processing + * @throws IOException if an error occurred on the coprocessor + */ + Result preUpdateAfterRowLock(final ObserverContext c, + final Update update) throws IOException; + + /** + * Called after update + *

+ * Call CoprocessorEnvironment#complete to skip any subsequent chained + * coprocessors + *

+ * Note: Do not retain references to any Cells in 'delta' beyond the life of this invocation. + * If need a Cell reference for later use, copy the cell and use that. + * @param c the environment provided by the region server + * @param update delta object + * @param result the result returned by increment + * @return the result to return to the client + * @throws IOException if an error occurred on the coprocessor + */ + Result postUpdate(final ObserverContext c, + final Update update, final Result result) + throws IOException; + + /** * Called before the client opens a new scanner. *

* Call CoprocessorEnvironment#bypass to skip default actions diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 831627b..37f2c48 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -109,6 +109,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Update; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType; @@ -7238,6 +7239,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return doDelta(Operation.INCREMENT, mutation, nonceGroup, nonce, mutation.isReturnResults()); } + @Override + public Result update(Update update, long nonceGroup, long nonce) throws IOException { + return doDelta(Operation.UPDATE, update, nonceGroup, nonce, update.isReturnResults()); + } + /** * Add "deltas" to Cells. Deltas are increments or appends. Switch on op. * @@ -7270,9 +7276,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Durability effectiveDurability = getEffectiveDurability(mutation.getDurability()); Map> forMemStore = new HashMap>(mutation.getFamilyCellMap().size()); - // Reckon Cells to apply to WAL -- in returned walEdit -- and what to add to memstore and - // what to return back to the client (in 'forMemStore' and 'results' respectively). - WALEdit walEdit = reckonDeltas(op, mutation, effectiveDurability, forMemStore, results); + WALEdit walEdit; + if (op == Operation.UPDATE) { + walEdit = reckonUpdate((Update) mutation, effectiveDurability, forMemStore, results); + } else { // append/increment + // Reckon Cells to apply to WAL -- in returned walEdit -- and what to add to memstore and + // what to return back to the client (in 'forMemStore' and 'results' respectively). + walEdit = reckonDeltas(op, mutation, effectiveDurability, forMemStore, results); + } // Actually write to WAL now if a walEdit to apply. if (walEdit != null && !walEdit.isEmpty()) { writeEntry = doWALAppend(walEdit, effectiveDurability, nonceGroup, nonce); @@ -7322,6 +7333,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi case APPEND: this.metricsRegion.updateAppend(); break; + case UPDATE: + this.metricsRegion.updateUpdate(); + break; default: break; } @@ -7378,6 +7392,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi case APPEND: result = this.coprocessorHost.preAppendAfterRowLock((Append)mutation); break; + case UPDATE: + result = this.coprocessorHost.preUpdateAfterRowLock((Update)mutation); + break; default: throw new UnsupportedOperationException(op.toString()); } } @@ -7408,12 +7425,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Store store = this.stores.get(columnFamilyName); // Reckon for the Store what to apply to WAL and MemStore. List toApply = - reckonDeltasByStore(store, op, mutation, effectiveDurability, now, deltas, results); + reckonDeltasByStore(store, op, mutation, now, deltas, results); if (!toApply.isEmpty()) { forMemStore.put(store, toApply); if (writeToWAL) { if (walEdit == null) { - walEdit = new WALEdit(); + walEdit = new WALEdit(toApply.size()); } walEdit.getCells().addAll(toApply); } @@ -7422,6 +7439,47 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return walEdit; } + WALEdit reckonUpdate(final Update update, final Durability effectiveDurability, + final Map> forMemStore, final List results) throws IOException { + List currentValues = get(update, + null/*Default IsolationLevel*/, + update.getTimeRange()); + final boolean writeToWAL = effectiveDurability != Durability.SKIP_WAL; + Mutation result = update.getRowUpdater().apply(currentValues, update); + if (result == null || result.isEmpty()) { + return null; + } + NavigableMap> familyMap = result.getFamilyCellMap(); + long now = EnvironmentEdgeManager.currentTime(); + if (result instanceof Put) { + checkFamilies(familyMap.keySet()); + checkTimestamps(result.getFamilyCellMap(), now); + } else if (result instanceof Delete) { + prepareDelete((Delete)result); + } else { + throw new DoNotRetryIOException("RowUpdater must return Put or Delete"); + } + checkRow(result.getRow(), "reckonUpdate"); + WALEdit walEdit = null; + for (Map.Entry> entry : familyMap.entrySet()) { + Store store = this.stores.get(entry.getKey()); + if (entry.getValue().isEmpty()) { + continue; + } + forMemStore.put(store, entry.getValue()); + if (results != null) { + results.addAll(entry.getValue()); + } + if (writeToWAL) { + if (walEdit == null) { + walEdit = new WALEdit(entry.getValue().size()); + } + walEdit.add(entry.getValue()); + } + } + return walEdit; + } + /** * Reckon the Cells to apply to WAL, memstore, and to return to the Client in passed * column family/Store. @@ -7438,8 +7496,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * values. Side effect is our filling out of the results List. */ private List reckonDeltasByStore(final Store store, final Operation op, - final Mutation mutation, final Durability effectiveDurability, final long now, - final List deltas, final List results) + final Mutation mutation, final long now, final List deltas, final List results) throws IOException { byte [] columnFamily = store.getFamily().getName(); List toApply = new ArrayList(deltas.size()); @@ -7618,6 +7675,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } /** + * Do a specific Get on passed columnFamily and column qualifiers. + * @param mutation Mutation we are doing this Get for. + * @param store Which column family on row (TODO: Go all Gets in one go) + * @param coordinates Cells from mutation used as coordinates applied to Get. + * @return Return list of Cells found. + */ + private List get(final Mutation mutation, final IsolationLevel isolation, final TimeRange tr) + throws IOException { + Get get = new Get(mutation.getRow()); + if (isolation != null) { + get.setIsolationLevel(isolation); + } + for (Map.Entry> entry : mutation.getFamilyCellMap().entrySet()) { + for (Cell cell: entry.getValue()) { + get.addColumn(entry.getKey(), CellUtil.cloneQualifier(cell)); + } + } + // Increments carry time range. If an Increment instance, put it on the Get. + if (tr != null) { + get.setTimeRange(tr.getMin(), tr.getMax()); + } + return get(get, false); + } + + /** * @return Sorted list of cells using comparator */ private static List sort(List cells, final Comparator comparator) { @@ -7861,6 +7943,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi checkReadsEnabled(); case INCREMENT: // write operations case APPEND: + case UPDATE: case SPLIT_REGION: case MERGE_REGION: case PUT: diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java index 0364e91..1b1ff1b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegion.java @@ -61,6 +61,10 @@ public class MetricsRegion { source.updateAppend(); } + public void updateUpdate() { + source.updateUpdate(); + } + public void updateIncrement() { source.updateIncrement(); } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 8bca6c5..d11aebb 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -86,6 +86,13 @@ public class MetricsRegionServer { serverSource.updateIncrement(t); } + public void updateUpdate(long t) { + if (t > 1000) { + serverSource.incrSlowUpdate(); + } + serverSource.updateUpdate(t); + } + public void updateAppend(long t) { if (t > 1000) { serverSource.incrSlowAppend(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index b0165f0..b03c72c 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Update; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -647,6 +648,54 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } /** + * Execute an update mutation. + * + * @param region + * @param mutation + * @return the Result + * @throws IOException + */ + private Result update(final Region region, final OperationQuota quota, + final MutationProto mutation, final CellScanner cells, long nonceGroup) + throws IOException { + long before = EnvironmentEdgeManager.currentTime(); + Update update = ProtobufUtil.toUpdate(mutation, cells); + quota.addMutation(update); + Result r = null; + if (region.getCoprocessorHost() != null) { + r = region.getCoprocessorHost().preUpdate(update); + } + if (r == null) { + boolean canProceed = startNonceOperation(mutation, nonceGroup); + boolean success = false; + try { + long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE; + if (canProceed) { + r = region.update(update, nonceGroup, nonce); + } else { + // convert duplicate increment to get + List results = region.get(ProtobufUtil.toGet(mutation, cells), false, nonceGroup, + nonce); + r = Result.create(results); + } + success = true; + } finally { + if (canProceed) { + endNonceOperation(mutation, nonceGroup, success); + } + } + if (region.getCoprocessorHost() != null) { + r = region.getCoprocessorHost().postUpdate(update, r); + } + } + if (regionServer.metricsRegionServer != null) { + regionServer.metricsRegionServer.updateUpdate( + EnvironmentEdgeManager.currentTime() - before); + } + return r; + } + + /** * Execute an increment mutation. * * @param region @@ -805,6 +854,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, case INCREMENT: r = increment(region, quota, action.getMutation(), cellScanner, nonceGroup); break; + case UPDATE: + r = update(region, quota, action.getMutation(), cellScanner, nonceGroup); + break; case PUT: case DELETE: // Collect the individual mutations and apply in a batch @@ -2508,7 +2560,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, MutationType type = mutation.getMutateType(); quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE); - switch (type) { case APPEND: // TODO: this doesn't actually check anything. @@ -2518,6 +2569,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // TODO: this doesn't actually check anything. r = increment(region, quota, mutation, cellScanner, nonceGroup); break; + case UPDATE: + r = update(region, quota, mutation, cellScanner, nonceGroup); + break; case PUT: Put put = ProtobufUtil.toPut(mutation, cellScanner); quota.addMutation(put); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 1b106b2..4538e11 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.client.Update; /** * Regions store data for a certain region of a table. It stores all columns @@ -236,7 +237,7 @@ public interface Region extends ConfigurationObserver { * context for various checks. */ enum Operation { - ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, + ANY, GET, PUT, DELETE, UPDATE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT } @@ -390,6 +391,16 @@ public interface Region extends ConfigurationObserver { void delete(Delete delete) throws IOException; /** + * Perform one or more update operations on a row. + * @param update + * @param nonceGroup + * @param nonce + * @return result of the operation + * @throws IOException + */ + Result update(Update update, long nonceGroup, long nonce) throws IOException; + + /** * Do a get based on the get parameter. * @param get query parameters * @return result of the operation diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index c7e3598..4d84d91 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Update; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; @@ -1285,6 +1286,56 @@ public class RegionCoprocessorHost } /** + * @param update update object + * @param result the result returned by postIncrement + * @throws IOException if an error occurred on the coprocessor + */ + public Result postUpdate(final Update update, Result result) throws IOException { + return execOperationWithResult(result, + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + @Override + public void call(RegionObserver oserver, ObserverContext ctx) + throws IOException { + setResult(oserver.postUpdate(ctx, update, getResult())); + } + }); + } + + /** + * @param update update object + * @return result to return to client if default operation should be + * bypassed, null otherwise + * @throws IOException if an error occurred on the coprocessor + */ + public Result preUpdate(final Update update) throws IOException { + return execOperationWithResult(true, null, + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + @Override + public void call(RegionObserver oserver, ObserverContext ctx) + throws IOException { + setResult(oserver.preUpdate(ctx, update)); + } + }); + } + + /** + * @param update update object + * @return result to return to client if default operation should be + * bypassed, null otherwise + * @throws IOException if an error occurred on the coprocessor + */ + public Result preUpdateAfterRowLock(final Update update) throws IOException { + return execOperationWithResult(true, null, + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + @Override + public void call(RegionObserver oserver, ObserverContext ctx) + throws IOException { + setResult(oserver.preUpdateAfterRowLock(ctx, update)); + } + }); + } + + /** * @param scan the Scan specification * @return scanner id to return to client if default operation should be * bypassed, false otherwise diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 75c1c3e..8902b66 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.io.Writable; import com.google.common.annotations.VisibleForTesting; +import java.util.Collection; /** @@ -171,6 +172,11 @@ public class WALEdit implements Writable, HeapSize { return this; } + public WALEdit add(Collection cells) { + this.cells.addAll(cells); + return this; + } + public boolean isEmpty() { return cells.isEmpty(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateFromClientSide.java new file mode 100644 index 0000000..580bbb1 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateFromClientSide.java @@ -0,0 +1,582 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.junit.After; +import org.junit.AfterClass; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.experimental.categories.Category; +import org.junit.Test; +import org.junit.rules.TestName; + + + +@Category({LargeTests.class, ClientTests.class}) +public class TestUpdateFromClientSide { + private static final Log LOG = LogFactory.getLog(TestUpdateFromClientSide.class); + @Rule + public TestName name = new TestName(); + // name of test method + private TableName tableName; + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final int SLAVES = 3; + private static final byte[] ROW = Bytes.toBytes("testRow"); + private static final byte[] QUALITY_QUAL_0 = Bytes.toBytes("quality_qual_0"); + private static final byte[] QUALITY_QUAL_1 = Bytes.toBytes("quality_qual_1"); + private static final byte[] QUALITY_QUAL_2 = Bytes.toBytes("quality_qual_2"); + private static final byte[] FAMILY = Bytes.toBytes("f1"); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(SLAVES); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + tableName = TableName.valueOf(name.getMethodName()); + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + for (HTableDescriptor htd: TEST_UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + TEST_UTIL.deleteTable(htd.getTableName()); + } + } + + @Test + public void testUpdateWithColumn() throws IOException { + doUpdateAndColumn(tableName, true); + } + + @Test + public void testUpdateWithoutColumn() throws IOException { + doUpdateAndColumn(tableName, false); + } + + private void doUpdateAndColumn(final TableName tableName, boolean addColumn) throws IOException { + Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + long initValue = 5; + Update update = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + if (addColumn) { + update.add(FAMILY, QUALITY_QUAL_0); + update.add(FAMILY, QUALITY_QUAL_1); + update.add(FAMILY, QUALITY_QUAL_2); + } + // result= + // QUALITY_QUAL_0 : null + // QUALITY_QUAL_1 : null + // QUALITY_QUAL_2 : null + Result r = table.update(update); + assertEquals(3, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue); + // result= + // QUALITY_QUAL_0 : 5 + // QUALITY_QUAL_1 : 5 + // QUALITY_QUAL_2 : 5 + r = table.update(update); + assertEquals(1, r.size()); + assertFalse(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertFalse(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + // result= + // QUALITY_QUAL_0 : 5 + // QUALITY_QUAL_1 : 5 + // QUALITY_QUAL_2 : 15 + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue * 3); + Put put = new Put(ROW); + long addValue = 1000; + put.addColumn(FAMILY, QUALITY_QUAL_0, Bytes.toBytes(addValue)); + table.put(put); + // result= + // QUALITY_QUAL_0 : 1000 + // QUALITY_QUAL_1 : 5 + // QUALITY_QUAL_2 : 15 + r = table.update(update); + assertEquals(1, r.size()); + assertFalse(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertFalse(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), addValue + initValue * 4); + // result= + // QUALITY_QUAL_0 : 1000 + // QUALITY_QUAL_1 : 5 + // QUALITY_QUAL_2 : 1020 + r = table.get(new Get(ROW)); + assertEquals(3, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), addValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), addValue + initValue * 4); + } + + @Test + public void testUpdateWithObserver() throws IOException, InterruptedException { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addCoprocessor(UpdateObserver.class.getName()); + desc.addFamily(new HColumnDescriptor(FAMILY)); + TEST_UTIL.getAdmin().createTable(desc); + Table table = TEST_UTIL.getConnection().getTable(tableName); + long initValue = 5; + Update update = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + update.add(FAMILY, QUALITY_QUAL_0); + update.add(FAMILY, QUALITY_QUAL_1); + update.add(FAMILY, QUALITY_QUAL_2); + Result r = table.update(update); + assertEquals(3, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue); + + UpdateObserver observer = find(tableName, UpdateObserver.class); + assertEquals(1, observer.preUpdateCount.get()); + assertEquals(1, observer.preUpdateAfterRowLockCount.get()); + assertEquals(1, observer.postUpdateCount.get()); + } + + @Test + public void testUpdateAndDelete() throws IOException, InterruptedException { + Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + long initValue = 999; + Update update = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + Result r = table.update(update); + assertEquals(3, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue); + + update = new Update(ROW, + new GetAndDeleteMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + r = table.update(update); + assertEquals(1, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + assertEquals(KeyValue.Type.Delete.getCode(), + r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2).getTypeByte()); + } + + @Test + public void testUpdateNoData() throws IOException, InterruptedException { + Table table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }); + long initValue = 999; + Update update = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + Result r = table.update(update); + assertEquals(3, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_0)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_1)); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), initValue); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue); + + Object[] rr = new Object[1]; + table.batch(Collections.singletonList(update), rr); + r = (Result) rr[0]; + assertEquals(1, r.size()); + assertTrue(r.containsColumn(FAMILY, QUALITY_QUAL_2)); + verify(r.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), initValue * 3); + } + + @Test + public void testUpdateResults() throws Exception { + List resultsWithWal = doUpdateAndWAL(TableName.valueOf(name.getMethodName() + "withWal"), true); + List resultsWithoutWal = doUpdateAndWAL(TableName.valueOf(name.getMethodName() + "withoutWal"), false); + assertEquals(resultsWithWal.size(), resultsWithoutWal.size()); + for (int i = 0; i != resultsWithWal.size(); ++i) { + Result resultWithWal = resultsWithWal.get(i); + Result resultWithoutWal = resultsWithoutWal.get(i); + assertEquals(resultWithWal.rawCells().length, resultWithoutWal.rawCells().length); + for (int j = 0; j != resultWithWal.rawCells().length; ++j) { + Cell cellWithWal = resultWithWal.rawCells()[j]; + Cell cellWithoutWal = resultWithoutWal.rawCells()[j]; + assertTrue(Bytes.equals(CellUtil.cloneRow(cellWithWal), CellUtil.cloneRow(cellWithoutWal))); + assertTrue(Bytes.equals(CellUtil.cloneFamily(cellWithWal), CellUtil.cloneFamily(cellWithoutWal))); + assertTrue(Bytes.equals(CellUtil.cloneQualifier(cellWithWal), CellUtil.cloneQualifier(cellWithoutWal))); + assertTrue(Bytes.equals(CellUtil.cloneValue(cellWithWal), CellUtil.cloneValue(cellWithoutWal))); + } + } + } + + @Test + public void doBatchUpdateWithResults() throws Exception { + doBatchUpdate(tableName, true); + } + + @Test + public void doBatchUpdateWithoutResults() throws Exception { + doBatchUpdate(tableName, false); + } + + private void doBatchUpdate(TableName tableName, boolean needReturnResults) throws Exception { + Table table = TEST_UTIL.createTable(tableName, FAMILY); + long initValue = 5; + Update update1 = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + update1.setReturnResults(needReturnResults); + Update update2 = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + update2.setReturnResults(needReturnResults); + Object[] results = new Object[2]; + table.batch(Arrays.asList(update1, update2), results); + Result r0 = (Result) results[0]; + Result r1 = (Result) results[1]; + if (needReturnResults) { + assertEquals(3, r0.size()); + assertKey(r0.getColumnLatestCell(FAMILY, QUALITY_QUAL_0), + ROW, FAMILY, QUALITY_QUAL_0, initValue); + assertKey(r0.getColumnLatestCell(FAMILY, QUALITY_QUAL_1), + ROW, FAMILY, QUALITY_QUAL_1, initValue); + assertKey(r0.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), + ROW, FAMILY, QUALITY_QUAL_2, initValue); + assertEquals(1, r1.size()); + assertKey(r1.getColumnLatestCell(FAMILY, QUALITY_QUAL_2), + ROW, FAMILY, QUALITY_QUAL_2, initValue * 3); + } else { + assertTrue(r0.isEmpty()); + assertTrue(r1.isEmpty()); + } + } + + private List doUpdateAndWAL(final TableName tableName, final boolean walUsed) throws IOException { + Table table = TEST_UTIL.createTable(tableName, FAMILY); + long initValue = 5; + Update update = new Update(ROW, + new GetAndPutMultiColumn(FAMILY, QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + if (walUsed) { + update.setDurability(Durability.SYNC_WAL); + } else { + update.setDurability(Durability.SKIP_WAL); + } + update.add(FAMILY, QUALITY_QUAL_0); + update.add(FAMILY, QUALITY_QUAL_1); + update.add(FAMILY, QUALITY_QUAL_2); + table.update(update); + List results = new ArrayList<>(); + try (ResultScanner scanner = table.getScanner(new Scan())) { + for (Result r : scanner) { + results.add(r); + } + } + return results; + } + + @Test + public void testDuplicateUpdate() throws Exception { + TableName name = TableName.valueOf("testDuplicateUpdate"); + HTableDescriptor hdt = new HTableDescriptor(name); + Map kvs = new HashMap<>(); + kvs.put(HConnectionTestingUtility.SleepAtFirstRpcCall.SLEEP_TIME_CONF_KEY, "2000"); + hdt.addCoprocessor(HConnectionTestingUtility.SleepAtFirstRpcCall.class.getName(), null, 1, kvs); + hdt.addFamily(new HColumnDescriptor(FAMILY)); + TEST_UTIL.createTable(hdt, new byte[][] { ROW }).close(); + + Configuration c = new Configuration(TEST_UTIL.getConfiguration()); + c.setInt(HConstants.HBASE_CLIENT_PAUSE, 50); + // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call + c.setInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, 1500); + c.setInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, 1500); + + Connection connection = ConnectionFactory.createConnection(c); + Table table = connection.getTable(name); + table.setOperationTimeout(3 * 1000); + long initValue = 100; + Update update = new Update(ROW, new GetAndPutMultiColumn(FAMILY, + QUALITY_QUAL_0, QUALITY_QUAL_1, QUALITY_QUAL_2, initValue)); + update.add(FAMILY, QUALITY_QUAL_0); + update.add(FAMILY, QUALITY_QUAL_1); + update.add(FAMILY, QUALITY_QUAL_2); + Result result = table.update(update); + + // Verify expected result + Cell[] cells = result.rawCells(); + assertEquals(3, cells.length); + assertKey(cells[0], ROW, FAMILY, QUALITY_QUAL_0, initValue); + assertKey(cells[1], ROW, FAMILY, QUALITY_QUAL_1, initValue); + assertKey(cells[2], ROW, FAMILY, QUALITY_QUAL_2, initValue); + + // Verify expected result again + Result readResult = table.get(new Get(ROW)); + cells = readResult.rawCells(); + assertEquals(3, cells.length); + assertKey(cells[0], ROW, FAMILY, QUALITY_QUAL_0, initValue); + assertKey(cells[1], ROW, FAMILY, QUALITY_QUAL_1, initValue); + assertKey(cells[2], ROW, FAMILY, QUALITY_QUAL_2, initValue); + } + + private void assertKey(Cell key, byte [] row, byte [] family, + byte [] qualifier, long value) + throws Exception { + assertTrue("Expected row [" + Bytes.toString(row) + "] " + + "Got row [" + Bytes.toString(CellUtil.cloneRow(key)) +"]", + Bytes.equals(row, CellUtil.cloneRow(key))); + assertTrue("Expected family [" + Bytes.toString(family) + "] " + + "Got family [" + Bytes.toString(CellUtil.cloneFamily(key)) + "]", + Bytes.equals(family, CellUtil.cloneFamily(key))); + assertTrue("Expected qualifier [" + Bytes.toString(qualifier) + "] " + + "Got qualifier [" + Bytes.toString(CellUtil.cloneQualifier(key)) + "]", + Bytes.equals(qualifier, CellUtil.cloneQualifier(key))); + assertTrue("Expected value [" + value + "] " + + "Got value [" + Bytes.toLong(CellUtil.cloneValue(key)) + "]", + Bytes.equals(Bytes.toBytes(value), CellUtil.cloneValue(key))); + } + private static void verify(Cell cell, long expected) { + long actual = Bytes.toLong(CellUtil.cloneValue(cell)); + assertEquals(expected, actual); + } + + private static T find(final TableName tableName, + Class clz) throws IOException, InterruptedException { + HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(tableName); + List regions = rs.getOnlineRegions(tableName); + assertEquals(1, regions.size()); + Region region = regions.get(0); + Coprocessor cp = region.getCoprocessorHost().findCoprocessor(clz.getName()); + assertTrue("The cp instance should be " + clz.getName() + + ", current instance is " + cp.getClass().getName(), clz.isInstance(cp)); + return clz.cast(cp); + } + + public static class UpdateObserver extends BaseRegionObserver { + private final AtomicInteger preUpdateCount = new AtomicInteger(0); + private final AtomicInteger preUpdateAfterRowLockCount = new AtomicInteger(0); + private final AtomicInteger postUpdateCount = new AtomicInteger(0); + @Override + public Result preUpdate(final ObserverContext e, + final Update update) throws IOException { + preUpdateCount.incrementAndGet(); + return null; + } + + @Override + public Result preUpdateAfterRowLock(final ObserverContext e, + final Update update) throws IOException { + preUpdateAfterRowLockCount.incrementAndGet(); + return null; + } + + @Override + public Result postUpdate(final ObserverContext e, + final Update update, final Result result) throws IOException { + postUpdateCount.incrementAndGet(); + return result; + } + } + public static class GetAndDeleteMultiColumn extends GetAndPutMultiColumn { + + public GetAndDeleteMultiColumn(final byte[] pbBytes) { + super(pbBytes); + } + + public GetAndDeleteMultiColumn(byte[] family, byte[] addv0, byte[] addv1, byte[] addv2, long initValue) { + super(family, addv0, addv1, addv2, initValue); + } + @Override + public Mutation apply(List currentValue, Mutation carrier) throws IOException { + long now = EnvironmentEdgeManager.currentTime(); + Result r = Result.create(currentValue); + Cell cell_0 = r.getColumnLatestCell(family, addv0); + Cell cell_1 = r.getColumnLatestCell(family, addv1); + Cell cell_2 = r.getColumnLatestCell(family, addv2); + boolean needDelete = false; + if (cell_0 != null && cell_1 != null) { + if (Bytes.equals(CellUtil.cloneValue(cell_0), CellUtil.cloneValue(cell_1))) { + needDelete = true; + } + } + if (needDelete && cell_2 != null) { + Delete d = new Delete(CellUtil.cloneRow(cell_2)); + d.addColumn(CellUtil.cloneFamily(cell_2), CellUtil.cloneQualifier(cell_2)); + return d; + } + return null; + } + + public static RowUpdater parseFrom(final byte[] pbBytes) throws DeserializationException { + return new GetAndDeleteMultiColumn(pbBytes); + } + } + public static class GetAndPutMultiColumn implements RowUpdater { + protected final byte[] family; + protected final byte[] addv0; + protected final byte[] addv1; + protected final byte[] addv2; + protected final long initValue; + GetAndPutMultiColumn(final byte[] pbBytes) { + int offset = 0; + int length = Bytes.toInt(pbBytes); + offset += Integer.BYTES; + family = Bytes.copy(pbBytes, offset, length); + offset += length; + + length = Bytes.toInt(pbBytes, offset); + offset += Integer.BYTES; + addv0 = Bytes.copy(pbBytes, offset, length); + offset += length; + + length = Bytes.toInt(pbBytes, offset); + offset += Integer.BYTES; + addv1 = Bytes.copy(pbBytes, offset, length); + offset += length; + + length = Bytes.toInt(pbBytes, offset); + offset += Integer.BYTES; + addv2 = Bytes.copy(pbBytes, offset, length); + offset += length; + initValue = Bytes.toLong(pbBytes, offset); + } + GetAndPutMultiColumn(final byte[] family, final byte[] addv0, + final byte[] addv1, final byte[] addv2, long initValue) { + this.family = family; + this.addv0 = addv0; + this.addv1 = addv1; + this.addv2 = addv2; + this.initValue = initValue; + } + @Override + public Mutation apply(List currentValue, Mutation carrier) throws IOException { + long now = EnvironmentEdgeManager.currentTime(); + Result r = Result.create(currentValue); + Cell cell_0 = r.getColumnLatestCell(family, addv0); + Cell cell_1 = r.getColumnLatestCell(family, addv1); + Cell cell_2 = r.getColumnLatestCell(family, addv2); + long inc = 0; + Put put; + if (r.isEmpty()) { + put = new Put(carrier.getRow()); + } else { + put = new Put(r.getRow()); + } + byte[] init = Bytes.toBytes(initValue); + if (cell_0 == null) { + put.addColumn(family, addv0, now, init); + } else { + inc += Bytes.toLong(cell_0.getValueArray(), cell_0.getValueOffset(), cell_0.getValueLength()); + } + if (cell_1 == null) { + put.addColumn(family, addv1, now, init); + } else { + inc += Bytes.toLong(cell_1.getValueArray(), cell_1.getValueOffset(), cell_1.getValueLength()); + } + if (cell_2 != null) { + inc += Bytes.toLong(cell_2.getValueArray(), cell_2.getValueOffset(), cell_2.getValueLength()); + } + if (inc == 0) { + put.addColumn(family, addv2, now, init); + } else { + put.addColumn(family, addv2, now, Bytes.toBytes(inc)); + } + return put; + } + + @Override + public byte[] toByteArray() throws IOException { + int length = family.length + Integer.BYTES + + addv0.length + Integer.BYTES + + addv1.length + Integer.BYTES + + addv2.length + Integer.BYTES + + Long.BYTES; + byte[] buf = new byte[length]; + int offset = 0; + offset = Bytes.putInt(buf, offset, family.length); + offset = Bytes.putBytes(buf, offset, family, 0, family.length); + offset = Bytes.putInt(buf, offset, addv0.length); + offset = Bytes.putBytes(buf, offset, addv0, 0, addv0.length); + offset = Bytes.putInt(buf, offset, addv1.length); + offset = Bytes.putBytes(buf, offset, addv1, 0, addv1.length); + offset = Bytes.putInt(buf, offset, addv2.length); + offset = Bytes.putBytes(buf, offset, addv2, 0, addv2.length); + offset = Bytes.putLong(buf, offset, initValue); + return buf; + } + + public static RowUpdater parseFrom(final byte[] pbBytes) throws DeserializationException { + return new GetAndPutMultiColumn(pbBytes); + } + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java index d2e78b7..4511d37 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java @@ -49,6 +49,7 @@ import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; +import org.apache.hadoop.hbase.client.Update; /** * An implementation of {@link Table} that sits directly on a Region; it decorates the passed in @@ -129,6 +130,11 @@ public class RegionAsTable implements Table { return results; } + @Override + public Result update(Update update) throws IOException { + throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. + } + static class RegionScannerToResultScannerAdaptor implements ResultScanner { private static final Result [] EMPTY_RESULT_ARRAY = new Result[0]; private final RegionScanner regionScanner;