diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index d7b52d5..ecfdd03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -22,6 +22,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.Future; import java.util.regex.Pattern; @@ -903,6 +904,16 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Split a table. Asynchronous operation. + * + * @param tableName table to split + * @param splitPoint the explicit position to split on + * @param id identifies this split request + * @throws IOException if a remote or network exception occurs + */ + void split(final TableName tableName, final byte[] splitPoint, final UUID id) throws IOException; + + /** * Split an individual region. Asynchronous operation. * * @param regionName region to split @@ -913,6 +924,17 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Split an individual region. Asynchronous operation. + * + * @param regionName region to split + * @param splitPoint the explicit position to split on + * @param id identifies this split request + * @throws IOException if a remote or network exception occurs + */ + void splitRegion(final byte[] regionName, final byte[] splitPoint, final UUID id) + throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index db94ff4..f862da1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -27,6 +27,7 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.Map.Entry; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -1506,7 +1507,17 @@ public class HBaseAdmin implements Admin { * {@inheritDoc} */ @Override - public void split(final TableName tableName, final byte [] splitPoint) throws IOException { + public void split(final TableName tableName, final byte [] splitPoint) + throws IOException { + split(tableName, splitPoint, null); + } + + /** + * {@inheritDoc} + */ + @Override + public void split(final TableName tableName, final byte [] splitPoint, final UUID id) + throws IOException { ZooKeeperWatcher zookeeper = null; try { checkTableExists(tableName); @@ -1528,7 +1539,7 @@ public class HBaseAdmin implements Admin { if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID || (splitPoint != null && !r.containsRow(splitPoint))) continue; // call out to region server to do split now - split(pair.getSecond(), pair.getFirst(), splitPoint); + split(pair.getSecond(), pair.getFirst(), splitPoint, id); } } finally { if (zookeeper != null) { @@ -1539,6 +1550,12 @@ public class HBaseAdmin implements Admin { @Override public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException { + splitRegion(regionName, splitPoint, null); + } + + @Override + public void splitRegion(final byte[] regionName, final byte [] splitPoint, final UUID id) + throws IOException { Pair regionServerPair = getRegion(regionName); if (regionServerPair == null) { throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); @@ -1551,19 +1568,19 @@ public class HBaseAdmin implements Admin { if (regionServerPair.getSecond() == null) { throw new NoServerForRegionException(Bytes.toStringBinary(regionName)); } - split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); + split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint, id); } @VisibleForTesting public void split(final ServerName sn, final HRegionInfo hri, - byte[] splitPoint) throws IOException { + byte[] splitPoint, final UUID id) throws IOException { if (hri.getStartKey() != null && splitPoint != null && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { throw new IOException("should not give a splitkey which equals to startkey!"); } // TODO: This is not executed via retries AdminService.BlockingInterface admin = this.connection.getAdmin(sn); - ProtobufUtil.split(admin, hri, splitPoint); + ProtobufUtil.split(admin, hri, splitPoint, id); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index f5e4305..2176c8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; +import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; @@ -1825,12 +1826,13 @@ public final class ProtobufUtil { * @param admin * @param hri * @param splitPoint + * @param id UUID that identifies the request * @throws IOException */ public static void split(final AdminService.BlockingInterface admin, - final HRegionInfo hri, byte[] splitPoint) throws IOException { + final HRegionInfo hri, byte[] splitPoint, UUID id) throws IOException { SplitRegionRequest request = - RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint); + RequestConverter.buildSplitRegionRequest(hri.getRegionName(), splitPoint, id); try { admin.splitRegion(null, request); } catch (ServiceException se) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index bd4c427..68ace95 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.protobuf; import java.io.IOException; import java.util.List; +import java.util.UUID; import java.util.regex.Pattern; import org.apache.hadoop.hbase.CellScannable; @@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; @@ -869,14 +871,21 @@ public final class RequestConverter { * * @param regionName the name of the region to split * @param splitPoint the split point + * @param id UUID that identifies the request * @return a SplitRegionRequest */ public static SplitRegionRequest buildSplitRegionRequest( - final byte[] regionName, final byte[] splitPoint) { + final byte[] regionName, final byte[] splitPoint, final UUID id) { SplitRegionRequest.Builder builder = SplitRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); + if (id != null) { + HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder(); + uuidBuilder.setLeastSigBits(id.getLeastSignificantBits()); + uuidBuilder.setMostSigBits(id.getMostSignificantBits()); + builder.setId(uuidBuilder.build()); + } if (splitPoint != null) { builder.setSplitPoint(ByteStringer.wrap(splitPoint)); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 1c59ea6..d59fcbf 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -10426,6 +10426,20 @@ public final class AdminProtos { * optional bytes split_point = 2; */ com.google.protobuf.ByteString getSplitPoint(); + + // optional .hbase.pb.UUID id = 3; + /** + * optional .hbase.pb.UUID id = 3; + */ + boolean hasId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId(); + /** + * optional .hbase.pb.UUID id = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder(); } /** * Protobuf type {@code hbase.pb.SplitRegionRequest} @@ -10505,6 +10519,19 @@ public final class AdminProtos { splitPoint_ = input.readBytes(); break; } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = id_.toBuilder(); + } + id_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(id_); + id_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -10583,9 +10610,32 @@ public final class AdminProtos { return splitPoint_; } + // optional .hbase.pb.UUID id = 3; + public static final int ID_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + return id_; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + return id_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); splitPoint_ = com.google.protobuf.ByteString.EMPTY; + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -10600,6 +10650,12 @@ public final class AdminProtos { memoizedIsInitialized = 0; return false; } + if (hasId()) { + if (!getId().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -10613,6 +10669,9 @@ public final class AdminProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, splitPoint_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, id_); + } getUnknownFields().writeTo(output); } @@ -10630,6 +10689,10 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, splitPoint_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, id_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -10663,6 +10726,11 @@ public final class AdminProtos { result = result && getSplitPoint() .equals(other.getSplitPoint()); } + result = result && (hasId() == other.hasId()); + if (hasId()) { + result = result && getId() + .equals(other.getId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10684,6 +10752,10 @@ public final class AdminProtos { hash = (37 * hash) + SPLIT_POINT_FIELD_NUMBER; hash = (53 * hash) + getSplitPoint().hashCode(); } + if (hasId()) { + hash = (37 * hash) + ID_FIELD_NUMBER; + hash = (53 * hash) + getId().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -10795,6 +10867,7 @@ public final class AdminProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionFieldBuilder(); + getIdFieldBuilder(); } } private static Builder create() { @@ -10811,6 +10884,12 @@ public final class AdminProtos { bitField0_ = (bitField0_ & ~0x00000001); splitPoint_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -10851,6 +10930,14 @@ public final class AdminProtos { to_bitField0_ |= 0x00000002; } result.splitPoint_ = splitPoint_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (idBuilder_ == null) { + result.id_ = id_; + } else { + result.id_ = idBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -10873,6 +10960,9 @@ public final class AdminProtos { if (other.hasSplitPoint()) { setSplitPoint(other.getSplitPoint()); } + if (other.hasId()) { + mergeId(other.getId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -10886,6 +10976,12 @@ public final class AdminProtos { return false; } + if (hasId()) { + if (!getId().isInitialized()) { + + return false; + } + } return true; } @@ -11061,6 +11157,123 @@ public final class AdminProtos { return this; } + // optional .hbase.pb.UUID id = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> idBuilder_; + /** + * optional .hbase.pb.UUID id = 3; + */ + public boolean hasId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getId() { + if (idBuilder_ == null) { + return id_; + } else { + return idBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + id_ = value; + onChanged(); + } else { + idBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder setId( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder builderForValue) { + if (idBuilder_ == null) { + id_ = builderForValue.build(); + onChanged(); + } else { + idBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder mergeId(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID value) { + if (idBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + id_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance()) { + id_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.newBuilder(id_).mergeFrom(value).buildPartial(); + } else { + id_ = value; + } + onChanged(); + } else { + idBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public Builder clearId() { + if (idBuilder_ == null) { + id_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + onChanged(); + } else { + idBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder getIdBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getIdFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.UUID id = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getIdOrBuilder() { + if (idBuilder_ != null) { + return idBuilder_.getMessageOrBuilder(); + } else { + return id_; + } + } + /** + * optional .hbase.pb.UUID id = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> + getIdFieldBuilder() { + if (idBuilder_ == null) { + idBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder>( + id_, + getParentForChildren(), + isClean()); + id_ = null; + } + return idBuilder_; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitRegionRequest) } @@ -24018,77 +24231,78 @@ public final class AdminProtos { "\n\026write_flush_wal_marker\030\003 \001(\010\"_\n\023FlushR" + "egionResponse\022\027\n\017last_flush_time\030\001 \002(\004\022\017", "\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wal_marke" + - "r\030\003 \001(\010\"T\n\022SplitRegionRequest\022)\n\006region\030" + + "r\030\003 \001(\010\"p\n\022SplitRegionRequest\022)\n\006region\030" + "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022\023\n\013spli" + - "t_point\030\002 \001(\014\"\025\n\023SplitRegionResponse\"`\n\024" + - "CompactRegionRequest\022)\n\006region\030\001 \002(\0132\031.h" + - "base.pb.RegionSpecifier\022\r\n\005major\030\002 \001(\010\022\016" + - "\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResponse\"" + - "\315\001\n\031UpdateFavoredNodesRequest\022I\n\013update_" + - "info\030\001 \003(\01324.hbase.pb.UpdateFavoredNodes" + - "Request.RegionUpdateInfo\032e\n\020RegionUpdate", - "Info\022$\n\006region\030\001 \002(\0132\024.hbase.pb.RegionIn" + - "fo\022+\n\rfavored_nodes\030\002 \003(\0132\024.hbase.pb.Ser" + - "verName\".\n\032UpdateFavoredNodesResponse\022\020\n" + - "\010response\030\001 \001(\r\"\244\001\n\023MergeRegionsRequest\022" + - "+\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpeci" + - "fier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.Region" + - "Specifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\032\n\022ma" + - "ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" + - "sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." + - "pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as", - "sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" + - "ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" + - ".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" + - "\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" + - "ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" + - "ateWALEntryResponse\"\026\n\024RollWALWriterRequ" + - "est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" + - "o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" + - "ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" + - "erverInfoRequest\"K\n\nServerInfo\022)\n\013server", - "_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" + - "ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" + - "\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" + - "o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" + - "eConfigurationResponse2\207\013\n\014AdminService\022" + - "P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" + - "oRequest\032\037.hbase.pb.GetRegionInfoRespons" + - "e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" + - "eRequest\032\036.hbase.pb.GetStoreFileResponse" + - "\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline", - "RegionRequest\032!.hbase.pb.GetOnlineRegion" + - "Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" + - "gionRequest\032\034.hbase.pb.OpenRegionRespons" + - "e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" + - "nRequest\032\036.hbase.pb.WarmupRegionResponse" + - "\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" + - "quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" + - "FlushRegion\022\034.hbase.pb.FlushRegionReques" + - "t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" + - "tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.", - "hbase.pb.SplitRegionResponse\022P\n\rCompactR" + - "egion\022\036.hbase.pb.CompactRegionRequest\032\037." + - "hbase.pb.CompactRegionResponse\022M\n\014MergeR" + - "egions\022\035.hbase.pb.MergeRegionsRequest\032\036." + - "hbase.pb.MergeRegionsResponse\022\\\n\021Replica" + - "teWALEntry\022\".hbase.pb.ReplicateWALEntryR" + - "equest\032#.hbase.pb.ReplicateWALEntryRespo" + - "nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" + - "ryRequest\032#.hbase.pb.ReplicateWALEntryRe" + - "sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW", - "ALWriterRequest\032\037.hbase.pb.RollWALWriter" + - "Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" + - "ServerInfoRequest\032\037.hbase.pb.GetServerIn" + - "foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" + - "ServerRequest\032\034.hbase.pb.StopServerRespo" + - "nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" + - "ateFavoredNodesRequest\032$.hbase.pb.Update" + - "FavoredNodesResponse\022b\n\023UpdateConfigurat" + - "ion\022$.hbase.pb.UpdateConfigurationReques" + - "t\032%.hbase.pb.UpdateConfigurationResponse", - "BA\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\013AdminProtosH\001\210\001\001\240\001\001" + "t_point\030\002 \001(\014\022\032\n\002id\030\003 \001(\0132\016.hbase.pb.UUI" + + "D\"\025\n\023SplitRegionResponse\"`\n\024CompactRegio" + + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" + + "onSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001" + + "(\014\"\027\n\025CompactRegionResponse\"\315\001\n\031UpdateFa" + + "voredNodesRequest\022I\n\013update_info\030\001 \003(\01324" + + ".hbase.pb.UpdateFavoredNodesRequest.Regi", + "onUpdateInfo\032e\n\020RegionUpdateInfo\022$\n\006regi" + + "on\030\001 \002(\0132\024.hbase.pb.RegionInfo\022+\n\rfavore" + + "d_nodes\030\002 \003(\0132\024.hbase.pb.ServerName\".\n\032U" + + "pdateFavoredNodesResponse\022\020\n\010response\030\001 " + + "\001(\r\"\244\001\n\023MergeRegionsRequest\022+\n\010region_a\030" + + "\001 \002(\0132\031.hbase.pb.RegionSpecifier\022+\n\010regi" + + "on_b\030\002 \002(\0132\031.hbase.pb.RegionSpecifier\022\027\n" + + "\010forcible\030\003 \001(\010:\005false\022\032\n\022master_system_" + + "time\030\004 \001(\004\"\026\n\024MergeRegionsResponse\"a\n\010WA" + + "LEntry\022\035\n\003key\030\001 \002(\0132\020.hbase.pb.WALKey\022\027\n", + "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" + + "l_count\030\003 \001(\005\"\242\001\n\030ReplicateWALEntryReque" + + "st\022!\n\005entry\030\001 \003(\0132\022.hbase.pb.WALEntry\022\034\n" + + "\024replicationClusterId\030\002 \001(\t\022\"\n\032sourceBas" + + "eNamespaceDirPath\030\003 \001(\t\022!\n\031sourceHFileAr" + + "chiveDirPath\030\004 \001(\t\"\033\n\031ReplicateWALEntryR" + + "esponse\"\026\n\024RollWALWriterRequest\"0\n\025RollW" + + "ALWriterResponse\022\027\n\017region_to_flush\030\001 \003(" + + "\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024" + + "\n\022StopServerResponse\"\026\n\024GetServerInfoReq", + "uest\"K\n\nServerInfo\022)\n\013server_name\030\001 \002(\0132" + + "\024.hbase.pb.ServerName\022\022\n\nwebui_port\030\002 \001(" + + "\r\"B\n\025GetServerInfoResponse\022)\n\013server_inf" + + "o\030\001 \002(\0132\024.hbase.pb.ServerInfo\"\034\n\032UpdateC" + + "onfigurationRequest\"\035\n\033UpdateConfigurati" + + "onResponse2\207\013\n\014AdminService\022P\n\rGetRegion" + + "Info\022\036.hbase.pb.GetRegionInfoRequest\032\037.h" + + "base.pb.GetRegionInfoResponse\022M\n\014GetStor" + + "eFile\022\035.hbase.pb.GetStoreFileRequest\032\036.h" + + "base.pb.GetStoreFileResponse\022V\n\017GetOnlin", + "eRegion\022 .hbase.pb.GetOnlineRegionReques" + + "t\032!.hbase.pb.GetOnlineRegionResponse\022G\n\n" + + "OpenRegion\022\033.hbase.pb.OpenRegionRequest\032" + + "\034.hbase.pb.OpenRegionResponse\022M\n\014WarmupR" + + "egion\022\035.hbase.pb.WarmupRegionRequest\032\036.h" + + "base.pb.WarmupRegionResponse\022J\n\013CloseReg" + + "ion\022\034.hbase.pb.CloseRegionRequest\032\035.hbas" + + "e.pb.CloseRegionResponse\022J\n\013FlushRegion\022" + + "\034.hbase.pb.FlushRegionRequest\032\035.hbase.pb" + + ".FlushRegionResponse\022J\n\013SplitRegion\022\034.hb", + "ase.pb.SplitRegionRequest\032\035.hbase.pb.Spl" + + "itRegionResponse\022P\n\rCompactRegion\022\036.hbas" + + "e.pb.CompactRegionRequest\032\037.hbase.pb.Com" + + "pactRegionResponse\022M\n\014MergeRegions\022\035.hba" + + "se.pb.MergeRegionsRequest\032\036.hbase.pb.Mer" + + "geRegionsResponse\022\\\n\021ReplicateWALEntry\022\"" + + ".hbase.pb.ReplicateWALEntryRequest\032#.hba" + + "se.pb.ReplicateWALEntryResponse\022Q\n\006Repla" + + "y\022\".hbase.pb.ReplicateWALEntryRequest\032#." + + "hbase.pb.ReplicateWALEntryResponse\022P\n\rRo", + "llWALWriter\022\036.hbase.pb.RollWALWriterRequ" + + "est\032\037.hbase.pb.RollWALWriterResponse\022P\n\r" + + "GetServerInfo\022\036.hbase.pb.GetServerInfoRe" + + "quest\032\037.hbase.pb.GetServerInfoResponse\022G" + + "\n\nStopServer\022\033.hbase.pb.StopServerReques" + + "t\032\034.hbase.pb.StopServerResponse\022_\n\022Updat" + + "eFavoredNodes\022#.hbase.pb.UpdateFavoredNo" + + "desRequest\032$.hbase.pb.UpdateFavoredNodes" + + "Response\022b\n\023UpdateConfiguration\022$.hbase." + + "pb.UpdateConfigurationRequest\032%.hbase.pb", + ".UpdateConfigurationResponseBA\n*org.apac" + + "he.hadoop.hbase.protobuf.generatedB\013Admi" + + "nProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -24190,7 +24404,7 @@ public final class AdminProtos { internal_static_hbase_pb_SplitRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SplitRegionRequest_descriptor, - new java.lang.String[] { "Region", "SplitPoint", }); + new java.lang.String[] { "Region", "SplitPoint", "Id", }); internal_static_hbase_pb_SplitRegionResponse_descriptor = getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_SplitRegionResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index a1905a4..7fcb8bd 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -145,6 +145,7 @@ message FlushRegionResponse { message SplitRegionRequest { required RegionSpecifier region = 1; optional bytes split_point = 2; + optional UUID id = 3; } message SplitRegionResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java index 76b7cc2..a9b531e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.normalizer.NormalizationPlan; import java.io.IOException; import java.util.Arrays; +import java.util.UUID; /** * Normalization plan to split region. @@ -79,7 +80,7 @@ public class SplitNormalizationPlan implements NormalizationPlan { public void execute(Admin admin) { LOG.info("Executing splitting normalization plan: " + this); try { - admin.splitRegion(regionInfo.getRegionName()); + admin.splitRegion(regionInfo.getRegionName(), null, UUID.randomUUID()); } catch (IOException ex) { LOG.error("Error during region split: ", ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index f54f008..5e8af6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -24,6 +24,7 @@ import java.io.StringWriter; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; @@ -248,13 +249,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } public synchronized void requestSplit(final Region r, byte[] midKey) { - requestSplit(r, midKey, null); + requestSplit(r, midKey, null, null); } /* * The User parameter allows the split thread to assume the correct user identity */ - public synchronized void requestSplit(final Region r, byte[] midKey, User user) { + public synchronized void requestSplit(final Region r, byte[] midKey, User user, UUID id) { if (midKey == null) { LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + " not splittable because midkey=null"); @@ -264,7 +265,7 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return; } try { - this.splits.execute(new SplitRequest(r, midKey, this.server, user)); + this.splits.execute(new SplitRequest(r, midKey, this.server, user, id)); if (LOG.isDebugEnabled()) { LOG.debug("Split requested for " + r + ". " + this); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 5d9b90f..924f962 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -34,6 +34,7 @@ import java.util.Map.Entry; import java.util.NavigableMap; import java.util.Set; import java.util.TreeSet; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; @@ -129,6 +130,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse; @@ -1946,9 +1948,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (request.hasSplitPoint()) { splitPoint = request.getSplitPoint().toByteArray(); } + UUID id = null; + if (request.hasId()) { + HBaseProtos.UUID reqId = request.getId(); + id = new UUID(reqId.getMostSigBits(), reqId.getLeastSigBits()); + } ((HRegion)region).forceSplit(splitPoint); regionServer.compactSplitThread.requestSplit(region, ((HRegion)region).checkSplit(), - RpcServer.getRequestUser()); + RpcServer.getRequestUser(), id); return SplitRegionResponse.newBuilder().build(); } catch (DroppedSnapshotException ex) { regionServer.abort("Replay of WAL required. Forcing server shutdown", ex); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 91a5f37..aaaebb7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,13 +45,15 @@ class SplitRequest implements Runnable { private final HRegionServer server; private final User user; private TableLock tableLock; + private final UUID uuid; - SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user) { + SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user, UUID id) { Preconditions.checkNotNull(hrs); this.parent = (HRegion)region; this.midKey = midKey; this.server = hrs; this.user = user; + this.uuid = id; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index d92bf07..d41569a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; +import java.util.UUID; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; @@ -212,6 +213,22 @@ public interface SplitTransaction { throws IOException; /** + * Run the transaction. + * @param server Hosting server instance. Can be null when testing. + * @param services Used to online/offline regions. + * @param user + * @param id + * @throws IOException If thrown, transaction failed. + * Call {@link #rollback(Server, RegionServerServices)} + * @return Regions created + * @throws IOException + * @see #rollback(Server, RegionServerServices) + */ + PairOfSameType execute(final Server server, final RegionServerServices services, + User user, UUID id) throws IOException; + + + /** * Roll back a failed transaction * @param server Hosting server instance (May be null when testing). * @param services diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java index 70d040e..9a2867e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; @@ -497,6 +498,12 @@ public class SplitTransactionImpl implements SplitTransaction { @Override public PairOfSameType execute(final Server server, final RegionServerServices services, User user) throws IOException { + return execute(server, services, user, null); + } + + @Override + public PairOfSameType execute(final Server server, final RegionServerServices services, + User user, UUID id) throws IOException { this.server = server; this.rsServices = services; PairOfSameType regions = createDaughters(server, services, user); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 6730e0a..cf5a83a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -1214,7 +1214,7 @@ public class TestAdmin1 { // this API goes direct to the regionserver skipping any checks in the admin). Should fail try { TEST_UTIL.getHBaseAdmin().split(regions.get(1).getSecond(), regions.get(1).getFirst(), - new byte[]{(byte)'1'}); + new byte[]{(byte)'1'}, null); } catch (IOException ex) { gotException = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 052e05c..824c560 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -196,7 +196,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices())) { if (hri.getTable().equals(table)) { // splitRegion doesn't work if startkey/endkey are null - ProtobufUtil.split(hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2)); // hard code split + ProtobufUtil.split(hrs.getRSRpcServices(), hri, rowkey(ROWCOUNT / 2), null); } }