diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index d7b52d5..ecfdd03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -22,6 +22,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.Future; import java.util.regex.Pattern; @@ -903,6 +904,16 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Split a table. Asynchronous operation. + * + * @param tableName table to split + * @param splitPoint the explicit position to split on + * @param id identifies this split request + * @throws IOException if a remote or network exception occurs + */ + void split(final TableName tableName, final byte[] splitPoint, final UUID id) throws IOException; + + /** * Split an individual region. Asynchronous operation. * * @param regionName region to split @@ -913,6 +924,17 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Split an individual region. Asynchronous operation. + * + * @param regionName region to split + * @param splitPoint the explicit position to split on + * @param id identifies this split request + * @throws IOException if a remote or network exception occurs + */ + void splitRegion(final byte[] regionName, final byte[] splitPoint, final UUID id) + throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index db94ff4..f862da1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -27,6 +27,7 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -1506,7 +1507,17 @@ public class HBaseAdmin implements Admin { * {@inheritDoc} */ @Override - public void split(final TableName tableName, final byte [] splitPoint) throws IOException { + public void split(final TableName tableName, final byte [] splitPoint) + throws IOException { + split(tableName, splitPoint, null); + } + + /** + * {@inheritDoc} + */ + @Override + public void split(final TableName tableName, final byte [] splitPoint, final UUID id) + throws IOException { ZooKeeperWatcher zookeeper = null; try { checkTableExists(tableName); @@ -1528,7 +1539,7 @@ public class HBaseAdmin implements Admin { if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || (splitPoint != null && !r.containsRow(splitPoint))) continue; // call out to region server to do split now - split(pair.getSecond(), pair.getFirst(), splitPoint); + split(pair.getSecond(), pair.getFirst(), splitPoint, id); } } finally { if (zookeeper != null) { @@ -1539,6 +1550,12 @@ public class HBaseAdmin implements Admin { @Override public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException { + splitRegion(regionName, splitPoint, null); + } + + @Override + public void splitRegion(final byte[] regionName, final byte [] splitPoint, final UUID id) + throws IOException { Pair regionServerPair = getRegion(regionName); if (regionServerPair == null) { throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); @@ -1551,19 +1568,19 @@ public class HBaseAdmin implements Admin { if (regionServerPair.getSecond() == null) { throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); } - split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); + split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint, id); } @VisibleForTesting public void split(final ServerName sn, final HRegionInfo hri, - byte[] splitPoint) throws IOException { + byte[] splitPoint, final UUID id) throws IOException { if (hri.getStartKey() != null && splitPoint != null && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { throw new IOException("should not give a splitkey which equals to startkey!"); } // TODO: This is not executed via retries AdminService.BlockingInterface admin = this.connection.getAdmin(sn); - ProtobufUtil.split(admin, hri, splitPoint); + ProtobufUtil.split(admin, hri, splitPoint, id); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index f5e4305..2176c8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; +import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -1825,12 +1826,13 @@ public final class ProtobufUtil { * @param admin * @param hri * @param splitPoint + * @param id UUID that identifies the request * @throws IOException */ public static void split(final AdminService.BlockingInterface admin, - final HRegionInfo hri, byte[] splitPoint) throws IOException { + final HRegionInfo hri, byte[] splitPoint, UUID id) throws IOException { SplitRegionRequest request = - RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); + RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint, id); try { admin.splitRegion(null, request); } catch (ServiceException se) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index bd4c427..68ace95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.protobuf; import java.io.IOException; import java.util.List; +import java.util.UUID; import java.util.regex.Pattern; import org.apache.hadoop.hbase.CellScannable; @@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -869,14 +871,21 @@ public final class RequestConverter { * * @param regionName the name of the region to split * @param splitPoint the split point + * @param id UUID that identifies the request * @return a SplitRegionRequest */ public static SplitRegionRequest buildSplitRegionRequest( - final byte[] regionName, final byte[] splitPoint) { + final byte[] regionName, final byte[] splitPoint, final UUID id) { SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); + if (id != null) { + HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder(); + uuidBuilder.setLeastSigBits(id.getLeastSignificantBits()); + uuidBuilder.setMostSigBits(id.getMostSignificantBits()); + builder.setId(uuidBuilder.build()); + } if (splitPoint != null) { builder.setSplitPoint(ByteStringer.wrap(splitPoint)); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 1c59ea6..d59fcbf 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -10426,6 +10426,20 @@ public final class AdminProtos { * optional bytes split_point = 2; */ com.google.protobuf.ByteString getSplitPoint(); + + // optional .hbase.pb.UUID id = 3; + /** + * optional .hbase.pb.UUID id = 3; + */ + boolean hasId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder(); } /** * Protobuf type {@code hbase.pb.SplitRegionRequest} @@ -10505,6 +10519,19 @@ public final class AdminProtos { splitPoint_ = input.readBytes(); break; } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = id_.toBuilder(); + } + id_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(id_); + id_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -10583,9 +10610,32 @@ public final class AdminProtos { return splitPoint_; } + // optional .hbase.pb.UUID id = 3; + public static final int ID_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + return id_; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + return id_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); splitPoint_ = com.google.protobuf.ByteString.EMPTY; + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -10600,6 +10650,12 @@ public final class AdminProtos { memoizedIsInitialized = 0; return false; } + if (hasId()) { + if (!getId().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -10613,6 +10669,9 @@ public final class AdminProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, splitPoint_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, id_); + } getUnknownFields().writeTo(output); } @@ -10630,6 +10689,10 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, splitPoint_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, id_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -10663,6 +10726,11 @@ public final class AdminProtos { result = result && getSplitPoint() .equals(other.getSplitPoint()); } + result = result && (hasId() == other.hasId()); + if (hasId()) { + result = result && getId() + .equals(other.getId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10684,6 +10752,10 @@ public final class AdminProtos { hash = (37 * hash) + SPLIT_POINT_FIELD_NUMBER; hash = (53 * hash) + getSplitPoint().hashCode(); } + if (hasId()) { + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -10795,6 +10867,7 @@ public final class AdminProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionFieldBuilder(); + getIdFieldBuilder(); } } private static Builder create() { @@ -10811,6 +10884,12 @@ public final class AdminProtos { bitField0_ = (bitField0_ & ~0x00000001); splitPoint_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -10851,6 +10930,14 @@ public final class AdminProtos { to_bitField0_ |= 0x00000002; } result.splitPoint_ = splitPoint_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (idBuilder_ == null) { + result.id_ = id_; + } else { + result.id_ = idBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -10873,6 +10960,9 @@ public final class AdminProtos { if (other.hasSplitPoint()) { setSplitPoint(other.getSplitPoint()); } + if (other.hasId()) { + mergeId(other.getId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -10886,6 +10976,12 @@ public final class AdminProtos { return false; } + if (hasId()) { + if (!getId().isInitialized()) { + + return false; + } + } return true; } @@ -11061,6 +11157,123 @@ public final class AdminProtos { return this; } + // optional .hbase.pb.UUID id = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> idBuilder_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + if (idBuilder_ == null) { + return id_; + } else { + return idBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + onChanged(); + } else { + idBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder builderForValue) { + if (idBuilder_ == null) { + id_ = builderForValue.build(); + onChanged(); + } else { + idBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder mergeId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + id_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance()) { + id_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.newBuilder(id_).mergeFrom(value).buildPartial(); + } else { + id_ = value; + } + onChanged(); + } else { + idBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder clearId() { + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + onChanged(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder getIdBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getIdFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + if (idBuilder_ != null) { + return idBuilder_.getMessageOrBuilder(); + } else { + return id_; + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> + getIdFieldBuilder() { + if (idBuilder_ == null) { + idBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder>( + id_, + getParentForChildren(), + isClean()); + id_ = null; + } + return idBuilder_; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitRegionRequest) } @@ -24018,77 +24231,78 @@ public final class AdminProtos { "\n\026write_flush_wal_marker\030\003 \001(\010\"_\n\023FlushR" + "egionResponse\022\027\n\017last_flush_time\030\001 \002(\004\022\017", "\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wal_marke" + - "r\030\003 \001(\010\"T\n\022SplitRegionRequest\022)\n\006region\030" + + "r\030\003 \001(\010\"p\n\022SplitRegionRequest\022)\n\006region\030" + "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\023\n\013spli" + - "t_point\030\002 \001(\014\"\025\n\023SplitRegionResponse\"`\n\024" + - "CompactRegionRequest\022)\n\006region\030\001 \002(\0132\031.h" + - "base.pb.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016" + - "\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResponse\"" + - "\315\001\n\031UpdateFavoredNodesRequest\022I\n\013update_" + - "info\030\001 \003(\01324.hbase.pb.UpdateFavoredNodes" + - "Request.RegionUpdateInfo\032e\n\020RegionUpdate", - "Info\022$\n\006region\030\001 \002(\0132\024.hbase.pb.RegionIn" + - "fo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase.pb.Ser" + - "verName\".\n\032UpdateFavoredNodesResponse\022\020\n" + - "\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsRequest\022" + - "+\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + - "fier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.Region" + - "Specifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022ma" + - "ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" + - "sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." + - "pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as", - "sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" + - "ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" + - ".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" + - "\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" + - "ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" + - "ateWALEntryResponse\"\026\n\024RollWALWriterRequ" + - "est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" + - "o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" + - "ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" + - "erverInfoRequest\"K\n\nServerInfo\022)\n\013server", - "_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" + - "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + - "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + - "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + - "eConfigurationResponse2\207\013\n\014AdminService\022" + - "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + - "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + - "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + - "eRequest\032\036.hbase.pb.GetStoreFileResponse" + - "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", - "RegionRequest\032!.hbase.pb.GetOnlineRegion" + - "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + - "gionRequest\032\034.hbase.pb.OpenRegionRespons" + - "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + - "nRequest\032\036.hbase.pb.WarmupRegionResponse" + - "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + - "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + - "FlushRegion\022\034.hbase.pb.FlushRegionReques" + - "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + - "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", - "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + - "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + - "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + - "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + - "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + - "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + - "equest\032#.hbase.pb.ReplicateWALEntryRespo" + - "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + - "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + - "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", - "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + - "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + - "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + - "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + - "ServerRequest\032\034.hbase.pb.StopServerRespo" + - "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + - "ateFavoredNodesRequest\032$.hbase.pb.Update" + - "FavoredNodesResponse\022b\n\023UpdateConfigurat" + - "ion\022$.hbase.pb.UpdateConfigurationReques" + - "t\032%.hbase.pb.UpdateConfigurationResponse", - "BA\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\013AdminProtosH\001\210\001\001\240\001\001" + "t_point\030\002 \001(\014\022\032\n\002id\030\003 \001(\0132\016.hbase.pb.UUI" + + "D\"\025\n\023SplitRegionResponse\"`\n\024CompactRegio" + + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" + + "onSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001" + + "(\014\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFa" + + "voredNodesRequest\022I\n\013update_info\030\001 \003(\01324" + + ".hbase.pb.UpdateFavoredNodesRequest.Regi", + "onUpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006regi" + + "on\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavore" + + "d_nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032U" + + "pdateFavoredNodesResponse\022\020\n\010response\030\001 " + + "\001(\r\"\244\001\n\023MergeRegionsRequest\022+\n\010region_a\030" + + "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022+\n\010regi" + + "on_b\030\002 \002(\0132\031.hbase.pb.RegionSpecifier\022\027\n" + + "\010forcible\030\003 \001(\010:\005false\022\032\n\022master_system_" + + "time\030\004 \001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WA" + + "LEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n", + "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" + + "l_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReque" + + "st\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n" + + "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas" + + "eNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileAr" + + "chiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryR" + + "esponse\"\026\n\024RollWALWriterRequest\"0\n\025RollW" + + "ALWriterResponse\022\027\n\017region_to_flush\030\001 \003(" + + "\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024" + + "\n\022StopServerResponse\"\026\n\024GetServerInfoReq", + "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132" + + "\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(" + + "\r\"B\n\025GetServerInfoResponse\022)\n\013server_inf" + + "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC" + + "onfigurationRequest\"\035\n\033UpdateConfigurati" + + "onResponse2\207\013\n\014AdminService\022P\n\rGetRegion" + + "Info\022\036.hbase.pb.GetRegionInfoRequest\032\037.h" + + "base.pb.GetRegionInfoResponse\022M\n\014GetStor" + + "eFile\022\035.hbase.pb.GetStoreFileRequest\032\036.h" + + "base.pb.GetStoreFileResponse\022V\n\017GetOnlin", + "eRegion\022 .hbase.pb.GetOnlineRegionReques" + + "t\032!.hbase.pb.GetOnlineRegionResponse\022G\n\n" + + "OpenRegion\022\033.hbase.pb.OpenRegionRequest\032" + + "\034.hbase.pb.OpenRegionResponse\022M\n\014WarmupR" + + "egion\022\035.hbase.pb.WarmupRegionRequest\032\036.h" + + "base.pb.WarmupRegionResponse\022J\n\013CloseReg" + + "ion\022\034.hbase.pb.CloseRegionRequest\032\035.hbas" + + "e.pb.CloseRegionResponse\022J\n\013FlushRegion\022" + + "\034.hbase.pb.FlushRegionRequest\032\035.hbase.pb" + + ".FlushRegionResponse\022J\n\013SplitRegion\022\034.hb", + "ase.pb.SplitRegionRequest\032\035.hbase.pb.Spl" + + "itRegionResponse\022P\n\rCompactRegion\022\036.hbas" + + "e.pb.CompactRegionRequest\032\037.hbase.pb.Com" + + "pactRegionResponse\022M\n\014MergeRegions\022\035.hba" + + "se.pb.MergeRegionsRequest\032\036.hbase.pb.Mer" + + "geRegionsResponse\022\\\n\021ReplicateWALEntry\022\"" + + ".hbase.pb.ReplicateWALEntryRequest\032#.hba" + + "se.pb.ReplicateWALEntryResponse\022Q\n\006Repla" + + "y\022\".hbase.pb.ReplicateWALEntryRequest\032#." + + "hbase.pb.ReplicateWALEntryResponse\022P\n\rRo", + "llWALWriter\022\036.hbase.pb.RollWALWriterRequ" + + "est\032\037.hbase.pb.RollWALWriterResponse\022P\n\r" + + "GetServerInfo\022\036.hbase.pb.GetServerInfoRe" + + "quest\032\037.hbase.pb.GetServerInfoResponse\022G" + + "\n\nStopServer\022\033.hbase.pb.StopServerReques" + + "t\032\034.hbase.pb.StopServerResponse\022_\n\022Updat" + + "eFavoredNodes\022#.hbase.pb.UpdateFavoredNo" + + "desRequest\032$.hbase.pb.UpdateFavoredNodes" + + "Response\022b\n\023UpdateConfiguration\022$.hbase." + + "pb.UpdateConfigurationRequest\032%.hbase.pb", + ".UpdateConfigurationResponseBA\n*org.apac" + + "he.hadoop.hbase.protobuf.generatedB\013Admi" + + "nProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24190,7 +24404,7 @@ public final class AdminProtos { internal_static_hbase_pb_SplitRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SplitRegionRequest_descriptor, - new java.lang.String[] { "Region", "SplitPoint", }); + new java.lang.String[] { "Region", "SplitPoint", "Id", }); internal_static_hbase_pb_SplitRegionResponse_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_SplitRegionResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java index 6754bd1..f2b4043 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java @@ -6964,6 +6964,20 @@ public final class RegionServerStatusProtos { */ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransitionOrBuilder getTransitionOrBuilder( int index); + + // optional .hbase.pb.UUID id = 3; + /** + * optional .hbase.pb.UUID id = 3; + */ + boolean hasId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder(); } /** * Protobuf type {@code hbase.pb.ReportRegionStateTransitionRequest} @@ -7037,6 +7051,19 @@ public final class RegionServerStatusProtos { transition_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.PARSER, extensionRegistry)); break; } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = id_.toBuilder(); + } + id_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(id_); + id_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -7150,9 +7177,32 @@ public final class RegionServerStatusProtos { return transition_.get(index); } + // optional .hbase.pb.UUID id = 3; + public static final int ID_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + return id_; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + return id_; + } + private void initFields() { server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); transition_ = java.util.Collections.emptyList(); + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -7173,6 +7223,12 @@ public final class RegionServerStatusProtos { return false; } } + if (hasId()) { + if (!getId().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -7186,6 +7242,9 @@ public final class RegionServerStatusProtos { for (int i = 0; i < transition_.size(); i++) { output.writeMessage(2, transition_.get(i)); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(3, id_); + } getUnknownFields().writeTo(output); } @@ -7203,6 +7262,10 @@ public final class RegionServerStatusProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, transition_.get(i)); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, id_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -7233,6 +7296,11 @@ public final class RegionServerStatusProtos { } result = result && getTransitionList() .equals(other.getTransitionList()); + result = result && (hasId() == other.hasId()); + if (hasId()) { + result = result && getId() + .equals(other.getId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -7254,6 +7322,10 @@ public final class RegionServerStatusProtos { hash = (37 * hash) + TRANSITION_FIELD_NUMBER; hash = (53 * hash) + getTransitionList().hashCode(); } + if (hasId()) { + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -7357,6 +7429,7 @@ public final class RegionServerStatusProtos { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getServerFieldBuilder(); getTransitionFieldBuilder(); + getIdFieldBuilder(); } } private static Builder create() { @@ -7377,6 +7450,12 @@ public final class RegionServerStatusProtos { } else { transitionBuilder_.clear(); } + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -7422,6 +7501,14 @@ public final class RegionServerStatusProtos { } else { result.transition_ = transitionBuilder_.build(); } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + if (idBuilder_ == null) { + result.id_ = id_; + } else { + result.id_ = idBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -7467,6 +7554,9 @@ public final class RegionServerStatusProtos { } } } + if (other.hasId()) { + mergeId(other.getId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -7486,6 +7576,12 @@ public final class RegionServerStatusProtos { return false; } } + if (hasId()) { + if (!getId().isInitialized()) { + + return false; + } + } return true; } @@ -7901,6 +7997,123 @@ public final class RegionServerStatusProtos { return transitionBuilder_; } + // optional .hbase.pb.UUID id = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> idBuilder_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + if (idBuilder_ == null) { + return id_; + } else { + return idBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + onChanged(); + } else { + idBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder builderForValue) { + if (idBuilder_ == null) { + id_ = builderForValue.build(); + onChanged(); + } else { + idBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder mergeId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + id_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance()) { + id_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.newBuilder(id_).mergeFrom(value).buildPartial(); + } else { + id_ = value; + } + onChanged(); + } else { + idBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder clearId() { + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + onChanged(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder getIdBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getIdFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + if (idBuilder_ != null) { + return idBuilder_.getMessageOrBuilder(); + } else { + return id_; + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> + getIdFieldBuilder() { + if (idBuilder_ == null) { + idBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder>( + id_, + getParentForChildren(), + isClean()); + id_ = null; + } + return idBuilder_; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.ReportRegionStateTransitionRequest) } @@ -9130,28 +9343,28 @@ public final class RegionServerStatusProtos { "EADY_TO_SPLIT\020\003\022\022\n\016READY_TO_MERGE\020\004\022\016\n\nS" + "PLIT_PONR\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPLIT\020\007\022\n" + "\n\006MERGED\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016MERGE_" + - "REVERTED\020\n\"\177\n\"ReportRegionStateTransitio" + - "nRequest\022$\n\006server\030\001 \002(\0132\024.hbase.pb.Serv" + - "erName\0223\n\ntransition\030\002 \003(\0132\037.hbase.pb.Re", - "gionStateTransition\"<\n#ReportRegionState" + - "TransitionResponse\022\025\n\rerror_message\030\001 \001(" + - "\t2\260\004\n\031RegionServerStatusService\022b\n\023Regio" + - "nServerStartup\022$.hbase.pb.RegionServerSt" + - "artupRequest\032%.hbase.pb.RegionServerStar" + - "tupResponse\022_\n\022RegionServerReport\022#.hbas" + - "e.pb.RegionServerReportRequest\032$.hbase.p" + - "b.RegionServerReportResponse\022_\n\022ReportRS" + - "FatalError\022#.hbase.pb.ReportRSFatalError" + - "Request\032$.hbase.pb.ReportRSFatalErrorRes", - "ponse\022q\n\030GetLastFlushedSequenceId\022).hbas" + - "e.pb.GetLastFlushedSequenceIdRequest\032*.h" + - "base.pb.GetLastFlushedSequenceIdResponse" + - "\022z\n\033ReportRegionStateTransition\022,.hbase." + - "pb.ReportRegionStateTransitionRequest\032-." + - "hbase.pb.ReportRegionStateTransitionResp" + - "onseBN\n*org.apache.hadoop.hbase.protobuf" + - ".generatedB\030RegionServerStatusProtosH\001\210\001" + - "\001\240\001\001" + "REVERTED\020\n\"\233\001\n\"ReportRegionStateTransiti" + + "onRequest\022$\n\006server\030\001 \002(\0132\024.hbase.pb.Ser" + + "verName\0223\n\ntransition\030\002 \003(\0132\037.hbase.pb.R", + "egionStateTransition\022\032\n\002id\030\003 \001(\0132\016.hbase" + + ".pb.UUID\"<\n#ReportRegionStateTransitionR" + + "esponse\022\025\n\rerror_message\030\001 \001(\t2\260\004\n\031Regio" + + "nServerStatusService\022b\n\023RegionServerStar" + + "tup\022$.hbase.pb.RegionServerStartupReques" + + "t\032%.hbase.pb.RegionServerStartupResponse" + + "\022_\n\022RegionServerReport\022#.hbase.pb.Region" + + "ServerReportRequest\032$.hbase.pb.RegionSer" + + "verReportResponse\022_\n\022ReportRSFatalError\022" + + "#.hbase.pb.ReportRSFatalErrorRequest\032$.h", + "base.pb.ReportRSFatalErrorResponse\022q\n\030Ge" + + "tLastFlushedSequenceId\022).hbase.pb.GetLas" + + "tFlushedSequenceIdRequest\032*.hbase.pb.Get" + + "LastFlushedSequenceIdResponse\022z\n\033ReportR" + + "egionStateTransition\022,.hbase.pb.ReportRe" + + "gionStateTransitionRequest\032-.hbase.pb.Re" + + "portRegionStateTransitionResponseBN\n*org" + + ".apache.hadoop.hbase.protobuf.generatedB" + + "\030RegionServerStatusProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9217,7 +9430,7 @@ public final class RegionServerStatusProtos { internal_static_hbase_pb_ReportRegionStateTransitionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ReportRegionStateTransitionRequest_descriptor, - new java.lang.String[] { "Server", "Transition", }); + new java.lang.String[] { "Server", "Transition", "Id", }); internal_static_hbase_pb_ReportRegionStateTransitionResponse_descriptor = getDescriptor().getMessageTypes().get(10); internal_static_hbase_pb_ReportRegionStateTransitionResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index a1905a4..7fcb8bd 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -145,6 +145,7 @@ message FlushRegionResponse { message SplitRegionRequest { required RegionSpecifier region = 1; optional bytes split_point = 2; + optional UUID id = 3; } message SplitRegionResponse { diff --git a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto index fda9de2..f81b39d 100644 --- a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto @@ -119,6 +119,7 @@ message ReportRegionStateTransitionRequest { required ServerName server = 1; repeated RegionStateTransition transition = 2; + optional UUID id = 3; } message ReportRegionStateTransitionResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index c319bb1..b41972b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -31,6 +31,7 @@ import java.util.NavigableMap; import java.util.Random; import java.util.Set; import java.util.TreeMap; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -2817,7 +2818,7 @@ public class AssignmentManager { * region open/close */ protected String onRegionTransition(final ServerName serverName, - final RegionStateTransition transition) { + final RegionStateTransition transition, final UUID id) { TransitionCode code = transition.getTransitionCode(); HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0)); Lock lock = locker.acquireLock(hri.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 141fa88..b2a190e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -1338,7 +1339,12 @@ public class MasterRpcServices extends RSRpcServices throw new PleaseHoldException("Master is rebuilding user regions"); } ServerName sn = ProtobufUtil.toServerName(req.getServer()); - String error = master.assignmentManager.onRegionTransition(sn, rt); + UUID id = null; + if (req.hasId()) { + HBaseProtos.UUID reqId = req.getId(); + id = new UUID(reqId.getMostSigBits(), reqId.getLeastSigBits()); + } + String error = master.assignmentManager.onRegionTransition(sn, rt, id); ReportRegionStateTransitionResponse.Builder rrtr = ReportRegionStateTransitionResponse.newBuilder(); if (error != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java index 76b7cc2..a9b531e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.normalizer.NormalizationPlan; import java.io.IOException; import java.util.Arrays; +import java.util.UUID; /** * Normalization plan to split region. @@ -79,7 +80,7 @@ public class SplitNormalizationPlan implements NormalizationPlan { public void execute(Admin admin) { LOG.info("Executing splitting normalization plan: " + this); try { - admin.splitRegion(regionInfo.getRegionName()); + admin.splitRegion(regionInfo.getRegionName(), null, UUID.randomUUID()); } catch (IOException ex) { LOG.error("Error during region split: ", ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index f54f008..5e8af6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -24,6 +24,7 @@ import java.io.StringWriter; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; @@ -248,13 +249,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } public synchronized void requestSplit(final Region r, byte[] midKey) { - requestSplit(r, midKey, null); + requestSplit(r, midKey, null, null); } /* * The User parameter allows the split thread to assume the correct user identity */ - public synchronized void requestSplit(final Region r, byte[] midKey, User user) { + public synchronized void requestSplit(final Region r, byte[] midKey, User user, UUID id) { if (midKey == null) { LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + " not splittable because midkey=null"); @@ -264,7 +265,7 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return; } try { - this.splits.execute(new SplitRequest(r, midKey, this.server, user)); + this.splits.execute(new SplitRequest(r, midKey, this.server, user, id)); if (LOG.isDebugEnabled()) { LOG.debug("Split requested for " + r + ". " + this); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b2cc78a..ef16962 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; @@ -1958,7 +1959,7 @@ public class HRegionServer extends HasThread implements RegionServerServices, La // Notify master if (!reportRegionStateTransition(new RegionStateTransitionContext( - TransitionCode.OPENED, openSeqNum, masterSystemTime, r.getRegionInfo()))) { + TransitionCode.OPENED, openSeqNum, masterSystemTime, null, r.getRegionInfo()))) { throw new IOException("Failed to report opened region to master: " + r.getRegionInfo().getRegionNameAsString()); } @@ -1977,7 +1978,7 @@ public class HRegionServer extends HasThread implements RegionServerServices, La public boolean reportRegionStateTransition( TransitionCode code, long openSeqNum, HRegionInfo... hris) { return reportRegionStateTransition( - new RegionStateTransitionContext(code, HConstants.NO_SEQNUM, -1, hris)); + new RegionStateTransitionContext(code, HConstants.NO_SEQNUM, -1, null, hris)); } @Override @@ -2016,6 +2017,12 @@ public class HRegionServer extends HasThread implements RegionServerServices, La ReportRegionStateTransitionRequest.Builder builder = ReportRegionStateTransitionRequest.newBuilder(); builder.setServer(ProtobufUtil.toServerName(serverName)); + if (context.getId() != null) { + HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder(); + uuidBuilder.setLeastSigBits(context.getId().getLeastSignificantBits()); + uuidBuilder.setMostSigBits(context.getId().getMostSignificantBits()); + builder.setId(uuidBuilder.build()); + } RegionStateTransition.Builder transition = builder.addTransitionBuilder(); transition.setTransitionCode(code); if (code == TransitionCode.OPENED && openSeqNum >= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 5d9b90f..924f962 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -34,6 +34,7 @@ import java.util.Map.Entry; import java.util.NavigableMap; import java.util.Set; import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -129,6 +130,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; @@ -1946,9 +1948,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (request.hasSplitPoint()) { splitPoint = request.getSplitPoint().toByteArray(); } + UUID id = null; + if (request.hasId()) { + HBaseProtos.UUID reqId = request.getId(); + id = new UUID(reqId.getMostSigBits(), reqId.getLeastSigBits()); + } ((HRegion)region).forceSplit(splitPoint); regionServer.compactSplitThread.requestSplit(region, ((HRegion)region).checkSplit(), - RpcServer.getRequestUser()); + RpcServer.getRequestUser(), id); return SplitRegionResponse.newBuilder().build(); } catch (DroppedSnapshotException ex) { regionServer.abort("Replay of WAL required. Forcing server shutdown", ex); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index cd4816c..6e35456 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.fs.FileSystem; @@ -126,14 +127,16 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi private final long openSeqNum; private final long masterSystemTime; private final HRegionInfo[] hris; + private final UUID id; @InterfaceAudience.Private public RegionStateTransitionContext(TransitionCode code, long openSeqNum, long masterSystemTime, - HRegionInfo... hris) { + UUID id, HRegionInfo... hris) { this.code = code; this.openSeqNum = openSeqNum; this.masterSystemTime = masterSystemTime; this.hris = hris; + this.id = id; } public TransitionCode getCode() { return code; @@ -147,6 +150,9 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi public HRegionInfo[] getHris() { return hris; } + public UUID getId() { + return id; + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 91a5f37..ec91df1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,13 +45,15 @@ class SplitRequest implements Runnable { private final HRegionServer server; private final User user; private TableLock tableLock; + private final UUID uuid; - SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user) { + SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user, UUID id) { Preconditions.checkNotNull(hrs); this.parent = (HRegion)region; this.midKey = midKey; this.server = hrs; this.user = user; + this.uuid = id; } @Override @@ -62,7 +65,7 @@ class SplitRequest implements Runnable { boolean success = false; server.metricsRegionServer.incrSplitRequest(); long startTime = EnvironmentEdgeManager.currentTime(); - SplitTransactionImpl st = new SplitTransactionImpl(parent, midKey); + SplitTransactionImpl st = new SplitTransactionImpl(parent, midKey, uuid); try { //acquire a shared read lock on the table, so that table schema modifications //do not happen concurrently diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java index 70d040e..ffb6a51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; @@ -44,6 +45,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.regionserver.RegionServerServices.RegionStateTransitionContext; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; @@ -81,6 +83,7 @@ public class SplitTransactionImpl implements SplitTransaction { private SplitTransactionPhase currentPhase = SplitTransactionPhase.STARTED; private Server server; private RegionServerServices rsServices; + private UUID id; public static class JournalEntryImpl implements JournalEntry { private SplitTransactionPhase type; @@ -133,6 +136,20 @@ public class SplitTransactionImpl implements SplitTransaction { public SplitTransactionImpl(final Region r, final byte [] splitrow) { this.parent = (HRegion)r; this.splitrow = splitrow; + this.id = null; + this.journal.add(new JournalEntryImpl(SplitTransactionPhase.STARTED)); + } + + /** + * Constructor + * @param r Region to split + * @param splitrow Row to split around + * @param id UUID of the split request + */ + public SplitTransactionImpl(final Region r, final byte [] splitrow, final UUID id) { + this.parent = (HRegion)r; + this.splitrow = splitrow; + this.id = id; this.journal.add(new JournalEntryImpl(SplitTransactionPhase.STARTED)); } @@ -340,8 +357,9 @@ public class SplitTransactionImpl implements SplitTransaction { @VisibleForTesting public PairOfSameType stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing) throws IOException { - if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT, - parent.getRegionInfo(), hri_a, hri_b)) { + if (services != null && !services.reportRegionStateTransition( + new RegionStateTransitionContext(TransitionCode.READY_TO_SPLIT, HConstants.NO_SEQNUM, -1, + id, parent.getRegionInfo(), hri_a, hri_b))) { throw new IOException("Failed to get ok from master to split " + parent.getRegionInfo().getRegionNameAsString()); } @@ -812,8 +830,9 @@ public class SplitTransactionImpl implements SplitTransaction { case SET_SPLITTING: if (services != null - && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED, - parent.getRegionInfo(), hri_a, hri_b)) { + && !services.reportRegionStateTransition( + new RegionStateTransitionContext(TransitionCode.SPLIT_REVERTED, HConstants.NO_SEQNUM, + -1, id, parent.getRegionInfo(), hri_a, hri_b))) { return false; } break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 6730e0a..cf5a83a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -1214,7 +1214,7 @@ public class TestAdmin1 { // this API goes direct to the regionserver skipping any checks in the admin). Should fail try { TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), - new byte[]{(byte)'1'}); + new byte[]{(byte)'1'}, null); } catch (IOException ex) { gotException = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 052e05c..824c560 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -196,7 +196,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) { if (hri.getTable().equals(table)) { // splitRegion doesn't work if startkey/endkey are null - ProtobufUtil.split(hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2)); // hard code split + ProtobufUtil.split(hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2), null); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 4c81b9a..4c403fc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -107,7 +107,7 @@ public class TestEndToEndSplitTransaction { byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(splitRow) .getRegionInfo().getRegionName(); Region region = server.getRegion(regionName); - SplitTransactionImpl split = new SplitTransactionImpl((HRegion) region, splitRow); + SplitTransactionImpl split = new SplitTransactionImpl((HRegion) region, splitRow, null); split.prepare(); // 1. phase I diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index cef92a6..0506e65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -2627,7 +2627,7 @@ public class TestHRegion { */ HRegion[] splitRegion(final HRegion parent, final byte[] midkey) throws IOException { PairOfSameType result = null; - SplitTransactionImpl st = new SplitTransactionImpl(parent, midkey); + SplitTransactionImpl st = new SplitTransactionImpl(parent, midkey, null); // If prepare does not return true, for some reason -- logged inside in // the prepare call -- we are not ready to split just now. Just return. if (!st.prepare()) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 26b3293..6928e92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -166,7 +166,7 @@ public class TestSplitTransaction { private SplitTransactionImpl prepareGOOD_SPLIT_ROW(final HRegion parentRegion) throws IOException { - SplitTransactionImpl st = new SplitTransactionImpl(parentRegion, GOOD_SPLIT_ROW); + SplitTransactionImpl st = new SplitTransactionImpl(parentRegion, GOOD_SPLIT_ROW, null); assertTrue(st.prepare()); return st; } @@ -181,7 +181,7 @@ public class TestSplitTransaction { when(storeMock.close()).thenReturn(ImmutableList.of()); this.parent.stores.put(Bytes.toBytes(""), storeMock); - SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW, null); assertFalse("a region should not be splittable if it has instances of store file references", st.prepare()); @@ -192,19 +192,19 @@ public class TestSplitTransaction { */ @Test public void testPrepareWithBadSplitRow() throws IOException { // Pass start row as split key. - SplitTransactionImpl st = new SplitTransactionImpl(this.parent, STARTROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, STARTROW, null); assertFalse(st.prepare()); - st = new SplitTransactionImpl(this.parent, HConstants.EMPTY_BYTE_ARRAY); + st = new SplitTransactionImpl(this.parent, HConstants.EMPTY_BYTE_ARRAY, null); assertFalse(st.prepare()); - st = new SplitTransactionImpl(this.parent, new byte [] {'A', 'A', 'A'}); + st = new SplitTransactionImpl(this.parent, new byte [] {'A', 'A', 'A'}, null); assertFalse(st.prepare()); - st = new SplitTransactionImpl(this.parent, ENDROW); + st = new SplitTransactionImpl(this.parent, ENDROW, null); assertFalse(st.prepare()); } @Test public void testPrepareWithClosedRegion() throws IOException { this.parent.close(); - SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW, null); assertFalse(st.prepare()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 41fbae6..859f343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -263,7 +263,7 @@ public class TestSplitTransactionOnCluster { assertTrue(fileNum > store.getStorefiles().size()); // 3, Split - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row3")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row3"), null); assertTrue(st.prepare()); st.execute(regionServer, regionServer); assertEquals(2, cluster.getRegions(tableName).size()); @@ -647,7 +647,7 @@ public class TestSplitTransactionOnCluster { assertEquals("The specified table should present.", true, tableExists); final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2"), null); try { st.prepare(); st.createDaughters(regionServer, regionServer, null); @@ -704,7 +704,7 @@ public class TestSplitTransactionOnCluster { regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); regionServer = cluster.getRegionServer(regionServerIndex); assertTrue("not able to find a splittable region", region != null); - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2"), null); try { st.prepare(); st.execute(regionServer, regionServer); @@ -879,7 +879,7 @@ public class TestSplitTransactionOnCluster { assertTrue("not able to find a splittable region", region != null); // Now split. - SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2"), null); try { st.prepare(); st.execute(regionServer, regionServer); @@ -972,7 +972,7 @@ public class TestSplitTransactionOnCluster { List regions = cluster.getRegions(desc.getTableName()); int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(serverWith); - SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3")); + SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3"), null); st.prepare(); st.stepsBeforePONR(regionServer, regionServer, false); Path tableDir = @@ -1089,7 +1089,7 @@ public class TestSplitTransactionOnCluster { private HRegion currentRegion; public MockedSplitTransaction(HRegion region, byte[] splitrow) { - super(region, splitrow); + super(region, splitrow, null); this.currentRegion = region; } @Override @@ -1340,7 +1340,7 @@ public class TestSplitTransactionOnCluster { break; } } - st = new SplitTransactionImpl((HRegion) region, splitKey); + st = new SplitTransactionImpl((HRegion) region, splitKey, null); if (!st.prepare()) { LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString() + " failed. So returning null. ");