diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 0ad10ad..0b8258d 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -3874,6 +3874,24 @@ public final class AdminProtos { * */ long getServerStartCode(); + + // optional bool isOpenForDistributedLogReplay = 3; + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+     * open region for distributedLogReplay
+     * 
+ */ + boolean hasIsOpenForDistributedLogReplay(); + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+     * open region for distributedLogReplay
+     * 
+ */ + boolean getIsOpenForDistributedLogReplay(); } /** * Protobuf type {@code OpenRegionRequest} @@ -3939,6 +3957,11 @@ public final class AdminProtos { serverStartCode_ = input.readUInt64(); break; } + case 24: { + bitField0_ |= 0x00000002; + isOpenForDistributedLogReplay_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -5073,9 +5096,34 @@ public final class AdminProtos { return serverStartCode_; } + // optional bool isOpenForDistributedLogReplay = 3; + public static final int ISOPENFORDISTRIBUTEDLOGREPLAY_FIELD_NUMBER = 3; + private boolean isOpenForDistributedLogReplay_; + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+     * open region for distributedLogReplay
+     * 
+ */ + public boolean hasIsOpenForDistributedLogReplay() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+     * open region for distributedLogReplay
+     * 
+ */ + public boolean getIsOpenForDistributedLogReplay() { + return isOpenForDistributedLogReplay_; + } + private void initFields() { openInfo_ = java.util.Collections.emptyList(); serverStartCode_ = 0L; + isOpenForDistributedLogReplay_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5101,6 +5149,9 @@ public final class AdminProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(2, serverStartCode_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(3, isOpenForDistributedLogReplay_); + } getUnknownFields().writeTo(output); } @@ -5118,6 +5169,10 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, serverStartCode_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, isOpenForDistributedLogReplay_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5148,6 +5203,11 @@ public final class AdminProtos { result = result && (getServerStartCode() == other.getServerStartCode()); } + result = result && (hasIsOpenForDistributedLogReplay() == other.hasIsOpenForDistributedLogReplay()); + if (hasIsOpenForDistributedLogReplay()) { + result = result && (getIsOpenForDistributedLogReplay() + == other.getIsOpenForDistributedLogReplay()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5169,6 +5229,10 @@ public final class AdminProtos { hash = (37 * hash) + SERVERSTARTCODE_FIELD_NUMBER; hash = (53 * hash) + hashLong(getServerStartCode()); } + if (hasIsOpenForDistributedLogReplay()) { + hash = (37 * hash) + ISOPENFORDISTRIBUTEDLOGREPLAY_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsOpenForDistributedLogReplay()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5287,6 +5351,8 @@ public final class AdminProtos { } serverStartCode_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); + isOpenForDistributedLogReplay_ = false; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -5328,6 +5394,10 @@ public final class AdminProtos { to_bitField0_ |= 0x00000001; } result.serverStartCode_ = serverStartCode_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.isOpenForDistributedLogReplay_ = isOpenForDistributedLogReplay_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5373,6 +5443,9 @@ public final class AdminProtos { if (other.hasServerStartCode()) { setServerStartCode(other.getServerStartCode()); } + if (other.hasIsOpenForDistributedLogReplay()) { + setIsOpenForDistributedLogReplay(other.getIsOpenForDistributedLogReplay()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5695,6 +5768,55 @@ public final class AdminProtos { return this; } + // optional bool isOpenForDistributedLogReplay = 3; + private boolean isOpenForDistributedLogReplay_ ; + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+       * open region for distributedLogReplay
+       * 
+ */ + public boolean hasIsOpenForDistributedLogReplay() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+       * open region for distributedLogReplay
+       * 
+ */ + public boolean getIsOpenForDistributedLogReplay() { + return isOpenForDistributedLogReplay_; + } + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+       * open region for distributedLogReplay
+       * 
+ */ + public Builder setIsOpenForDistributedLogReplay(boolean value) { + bitField0_ |= 0x00000004; + isOpenForDistributedLogReplay_ = value; + onChanged(); + return this; + } + /** + * optional bool isOpenForDistributedLogReplay = 3; + * + *
+       * open region for distributedLogReplay
+       * 
+ */ + public Builder clearIsOpenForDistributedLogReplay() { + bitField0_ = (bitField0_ & ~0x00000004); + isOpenForDistributedLogReplay_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:OpenRegionRequest) } @@ -21166,77 +21288,78 @@ public final class AdminProtos { "FileResponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetO" + "nlineRegionRequest\";\n\027GetOnlineRegionRes" + "ponse\022 \n\013region_info\030\001 \003(\0132\013.RegionInfo\"" + - "\326\001\n\021OpenRegionRequest\0224\n\topen_info\030\001 \003(\013" + + "\375\001\n\021OpenRegionRequest\0224\n\topen_info\030\001 \003(\013" + "2!.OpenRegionRequest.RegionOpenInfo\022\027\n\017s" + - "erverStartCode\030\002 \001(\004\032r\n\016RegionOpenInfo\022\033" + - "\n\006region\030\001 \002(\0132\013.RegionInfo\022\037\n\027version_o" + - "f_offline_node\030\002 \001(\r\022\"\n\rfavored_nodes\030\003 " + - "\003(\0132\013.ServerName\"\235\001\n\022OpenRegionResponse\022", - "=\n\ropening_state\030\001 \003(\0162&.OpenRegionRespo" + - "nse.RegionOpeningState\"H\n\022RegionOpeningS" + - "tate\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016" + - "FAILED_OPENING\020\002\"\271\001\n\022CloseRegionRequest\022" + - " \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\037\n\027ver" + - "sion_of_closing_node\030\002 \001(\r\022\036\n\020transition" + - "_in_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server" + - "\030\004 \001(\0132\013.ServerName\022\027\n\017serverStartCode\030\005" + - " \001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 " + - "\002(\010\"P\n\022FlushRegionRequest\022 \n\006region\030\001 \002(", - "\0132\020.RegionSpecifier\022\030\n\020if_older_than_ts\030" + - "\002 \001(\004\"?\n\023FlushRegionResponse\022\027\n\017last_flu" + - "sh_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitR" + - "egionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe" + - "cifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegi" + - "onResponse\"W\n\024CompactRegionRequest\022 \n\006re" + - "gion\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005major\030\002 " + - "\001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionResp" + - "onse\"\262\001\n\031UpdateFavoredNodesRequest\022@\n\013up" + - "date_info\030\001 \003(\0132+.UpdateFavoredNodesRequ", - "est.RegionUpdateInfo\032S\n\020RegionUpdateInfo" + - "\022\033\n\006region\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored" + - "_nodes\030\002 \003(\0132\013.ServerName\".\n\032UpdateFavor" + - "edNodesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023Mer" + - "geRegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Reg" + - "ionSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionS" + - "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Mer" + - "geRegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002" + - "(\0132\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025" + - "associated_cell_count\030\003 \001(\005\"4\n\030Replicate", - "WALEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntr" + - "y\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollWA" + - "LWriterRequest\"0\n\025RollWALWriterResponse\022" + - "\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopServerRe" + - "quest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespo" + - "nse\"\026\n\024GetServerInfoRequest\"B\n\nServerInf" + - "o\022 \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nw" + - "ebui_port\030\002 \001(\r\"9\n\025GetServerInfoResponse" + - "\022 \n\013server_info\030\001 \002(\0132\013.ServerInfo2\306\007\n\014A" + - "dminService\022>\n\rGetRegionInfo\022\025.GetRegion", - "InfoRequest\032\026.GetRegionInfoResponse\022;\n\014G" + - "etStoreFile\022\024.GetStoreFileRequest\032\025.GetS" + - "toreFileResponse\022D\n\017GetOnlineRegion\022\027.Ge" + - "tOnlineRegionRequest\032\030.GetOnlineRegionRe" + - "sponse\0225\n\nOpenRegion\022\022.OpenRegionRequest" + - "\032\023.OpenRegionResponse\0228\n\013CloseRegion\022\023.C" + - "loseRegionRequest\032\024.CloseRegionResponse\022" + - "8\n\013FlushRegion\022\023.FlushRegionRequest\032\024.Fl" + - "ushRegionResponse\0228\n\013SplitRegion\022\023.Split" + - "RegionRequest\032\024.SplitRegionResponse\022>\n\rC", - "ompactRegion\022\025.CompactRegionRequest\032\026.Co" + - "mpactRegionResponse\022;\n\014MergeRegions\022\024.Me" + - "rgeRegionsRequest\032\025.MergeRegionsResponse" + - "\022J\n\021ReplicateWALEntry\022\031.ReplicateWALEntr" + - "yRequest\032\032.ReplicateWALEntryResponse\022?\n\006" + - "Replay\022\031.ReplicateWALEntryRequest\032\032.Repl" + - "icateWALEntryResponse\022>\n\rRollWALWriter\022\025" + - ".RollWALWriterRequest\032\026.RollWALWriterRes" + - "ponse\022>\n\rGetServerInfo\022\025.GetServerInfoRe" + - "quest\032\026.GetServerInfoResponse\0225\n\nStopSer", - "ver\022\022.StopServerRequest\032\023.StopServerResp" + - "onse\022M\n\022UpdateFavoredNodes\022\032.UpdateFavor" + - "edNodesRequest\032\033.UpdateFavoredNodesRespo" + - "nseBA\n*org.apache.hadoop.hbase.protobuf." + - "generatedB\013AdminProtosH\001\210\001\001\240\001\001" + "erverStartCode\030\002 \001(\004\022%\n\035isOpenForDistrib" + + "utedLogReplay\030\003 \001(\010\032r\n\016RegionOpenInfo\022\033\n" + + "\006region\030\001 \002(\0132\013.RegionInfo\022\037\n\027version_of" + + "_offline_node\030\002 \001(\r\022\"\n\rfavored_nodes\030\003 \003", + "(\0132\013.ServerName\"\235\001\n\022OpenRegionResponse\022=" + + "\n\ropening_state\030\001 \003(\0162&.OpenRegionRespon" + + "se.RegionOpeningState\"H\n\022RegionOpeningSt" + + "ate\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016F" + + "AILED_OPENING\020\002\"\271\001\n\022CloseRegionRequest\022 " + + "\n\006region\030\001 \002(\0132\020.RegionSpecifier\022\037\n\027vers" + + "ion_of_closing_node\030\002 \001(\r\022\036\n\020transition_" + + "in_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server\030" + + "\004 \001(\0132\013.ServerName\022\027\n\017serverStartCode\030\005 " + + "\001(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 \002", + "(\010\"P\n\022FlushRegionRequest\022 \n\006region\030\001 \002(\013" + + "2\020.RegionSpecifier\022\030\n\020if_older_than_ts\030\002" + + " \001(\004\"?\n\023FlushRegionResponse\022\027\n\017last_flus" + + "h_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitRe" + + "gionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpec" + + "ifier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegio" + + "nResponse\"W\n\024CompactRegionRequest\022 \n\006reg" + + "ion\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005major\030\002 \001" + + "(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionRespo" + + "nse\"\262\001\n\031UpdateFavoredNodesRequest\022@\n\013upd", + "ate_info\030\001 \003(\0132+.UpdateFavoredNodesReque" + + "st.RegionUpdateInfo\032S\n\020RegionUpdateInfo\022" + + "\033\n\006region\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored_" + + "nodes\030\002 \003(\0132\013.ServerName\".\n\032UpdateFavore" + + "dNodesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023Merg" + + "eRegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Regi" + + "onSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSp" + + "ecifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Merg" + + "eRegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(" + + "\0132\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025a", + "ssociated_cell_count\030\003 \001(\005\"4\n\030ReplicateW" + + "ALEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry" + + "\"\033\n\031ReplicateWALEntryResponse\"\026\n\024RollWAL" + + "WriterRequest\"0\n\025RollWALWriterResponse\022\027" + + "\n\017region_to_flush\030\001 \003(\014\"#\n\021StopServerReq" + + "uest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespon" + + "se\"\026\n\024GetServerInfoRequest\"B\n\nServerInfo" + + "\022 \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nwe" + + "bui_port\030\002 \001(\r\"9\n\025GetServerInfoResponse\022" + + " \n\013server_info\030\001 \002(\0132\013.ServerInfo2\306\007\n\014Ad", + "minService\022>\n\rGetRegionInfo\022\025.GetRegionI" + + "nfoRequest\032\026.GetRegionInfoResponse\022;\n\014Ge" + + "tStoreFile\022\024.GetStoreFileRequest\032\025.GetSt" + + "oreFileResponse\022D\n\017GetOnlineRegion\022\027.Get" + + "OnlineRegionRequest\032\030.GetOnlineRegionRes" + + "ponse\0225\n\nOpenRegion\022\022.OpenRegionRequest\032" + + "\023.OpenRegionResponse\0228\n\013CloseRegion\022\023.Cl" + + "oseRegionRequest\032\024.CloseRegionResponse\0228" + + "\n\013FlushRegion\022\023.FlushRegionRequest\032\024.Flu" + + "shRegionResponse\0228\n\013SplitRegion\022\023.SplitR", + "egionRequest\032\024.SplitRegionResponse\022>\n\rCo" + + "mpactRegion\022\025.CompactRegionRequest\032\026.Com" + + "pactRegionResponse\022;\n\014MergeRegions\022\024.Mer" + + "geRegionsRequest\032\025.MergeRegionsResponse\022" + + "J\n\021ReplicateWALEntry\022\031.ReplicateWALEntry" + + "Request\032\032.ReplicateWALEntryResponse\022?\n\006R" + + "eplay\022\031.ReplicateWALEntryRequest\032\032.Repli" + + "cateWALEntryResponse\022>\n\rRollWALWriter\022\025." + + "RollWALWriterRequest\032\026.RollWALWriterResp" + + "onse\022>\n\rGetServerInfo\022\025.GetServerInfoReq", + "uest\032\026.GetServerInfoResponse\0225\n\nStopServ" + + "er\022\022.StopServerRequest\032\023.StopServerRespo" + + "nse\022M\n\022UpdateFavoredNodes\022\032.UpdateFavore" + + "dNodesRequest\032\033.UpdateFavoredNodesRespon" + + "seBA\n*org.apache.hadoop.hbase.protobuf.g" + + "eneratedB\013AdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -21284,7 +21407,7 @@ public final class AdminProtos { internal_static_OpenRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_OpenRegionRequest_descriptor, - new java.lang.String[] { "OpenInfo", "ServerStartCode", }); + new java.lang.String[] { "OpenInfo", "ServerStartCode", "IsOpenForDistributedLogReplay", }); internal_static_OpenRegionRequest_RegionOpenInfo_descriptor = internal_static_OpenRegionRequest_descriptor.getNestedTypes().get(0); internal_static_OpenRegionRequest_RegionOpenInfo_fieldAccessorTable = new diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java index 85e4816..eeab45c 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MapReduceProtos.java @@ -720,53 +720,53 @@ public final class MapReduceProtos { public interface TableSnapshotRegionSplitOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .TableSchema table = 1; + // repeated string locations = 2; /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - boolean hasTable(); + java.util.List + getLocationsList(); /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable(); + int getLocationsCount(); /** - * optional .TableSchema table = 1; + * repeated string locations = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder(); - - // optional .RegionInfo region = 2; + java.lang.String getLocations(int index); /** - * optional .RegionInfo region = 2; + * repeated string locations = 2; */ - boolean hasRegion(); + com.google.protobuf.ByteString + getLocationsBytes(int index); + + // optional .TableSchema table = 3; /** - * optional .RegionInfo region = 2; + * optional .TableSchema table = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); + boolean hasTable(); /** - * optional .RegionInfo region = 2; + * optional .TableSchema table = 3; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); - - // repeated string locations = 3; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable(); /** - * repeated string locations = 3; + * optional .TableSchema table = 3; */ - java.util.List - getLocationsList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder(); + + // optional .RegionInfo region = 4; /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - int getLocationsCount(); + boolean hasRegion(); /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - java.lang.String getLocations(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion(); /** - * repeated string locations = 3; + * optional .RegionInfo region = 4; */ - com.google.protobuf.ByteString - getLocationsBytes(int index); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder(); } /** * Protobuf type {@code TableSnapshotRegionSplit} @@ -819,7 +819,15 @@ public final class MapReduceProtos { } break; } - case 10: { + case 18: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + locations_.add(input.readBytes()); + break; + } + case 26: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = table_.toBuilder(); @@ -832,7 +840,7 @@ public final class MapReduceProtos { bitField0_ |= 0x00000001; break; } - case 18: { + case 34: { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = region_.toBuilder(); @@ -845,14 +853,6 @@ public final class MapReduceProtos { bitField0_ |= 0x00000002; break; } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000004; - } - locations_.add(input.readBytes()); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -861,7 +861,7 @@ public final class MapReduceProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { locations_ = new com.google.protobuf.UnmodifiableLazyStringList(locations_); } this.unknownFields = unknownFields.build(); @@ -896,84 +896,84 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .TableSchema table = 1; - public static final int TABLE_FIELD_NUMBER = 1; + // repeated string locations = 2; + public static final int LOCATIONS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList locations_; + /** + * repeated string locations = 2; + */ + public java.util.List + getLocationsList() { + return locations_; + } + /** + * repeated string locations = 2; + */ + public int getLocationsCount() { + return locations_.size(); + } + /** + * repeated string locations = 2; + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + /** + * repeated string locations = 2; + */ + public com.google.protobuf.ByteString + getLocationsBytes(int index) { + return locations_.getByteString(index); + } + + // optional .TableSchema table = 3; + public static final int TABLE_FIELD_NUMBER = 3; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_; /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { return table_; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { return table_; } - // optional .RegionInfo region = 2; - public static final int REGION_FIELD_NUMBER = 2; + // optional .RegionInfo region = 4; + public static final int REGION_FIELD_NUMBER = 4; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_; /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public boolean hasRegion() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { return region_; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { return region_; } - // repeated string locations = 3; - public static final int LOCATIONS_FIELD_NUMBER = 3; - private com.google.protobuf.LazyStringList locations_; - /** - * repeated string locations = 3; - */ - public java.util.List - getLocationsList() { - return locations_; - } - /** - * repeated string locations = 3; - */ - public int getLocationsCount() { - return locations_.size(); - } - /** - * repeated string locations = 3; - */ - public java.lang.String getLocations(int index) { - return locations_.get(index); - } - /** - * repeated string locations = 3; - */ - public com.google.protobuf.ByteString - getLocationsBytes(int index) { - return locations_.getByteString(index); - } - private void initFields() { + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -999,14 +999,14 @@ public final class MapReduceProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < locations_.size(); i++) { + output.writeBytes(2, locations_.getByteString(i)); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, table_); + output.writeMessage(3, table_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, region_); - } - for (int i = 0; i < locations_.size(); i++) { - output.writeBytes(3, locations_.getByteString(i)); + output.writeMessage(4, region_); } getUnknownFields().writeTo(output); } @@ -1017,14 +1017,6 @@ public final class MapReduceProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, table_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, region_); - } { int dataSize = 0; for (int i = 0; i < locations_.size(); i++) { @@ -1034,6 +1026,14 @@ public final class MapReduceProtos { size += dataSize; size += 1 * getLocationsList().size(); } + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, region_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -1057,6 +1057,8 @@ public final class MapReduceProtos { org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other = (org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit) obj; boolean result = true; + result = result && getLocationsList() + .equals(other.getLocationsList()); result = result && (hasTable() == other.hasTable()); if (hasTable()) { result = result && getTable() @@ -1067,8 +1069,6 @@ public final class MapReduceProtos { result = result && getRegion() .equals(other.getRegion()); } - result = result && getLocationsList() - .equals(other.getLocationsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -1082,6 +1082,10 @@ public final class MapReduceProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getLocationsCount() > 0) { + hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; + hash = (53 * hash) + getLocationsList().hashCode(); + } if (hasTable()) { hash = (37 * hash) + TABLE_FIELD_NUMBER; hash = (53 * hash) + getTable().hashCode(); @@ -1090,10 +1094,6 @@ public final class MapReduceProtos { hash = (37 * hash) + REGION_FIELD_NUMBER; hash = (53 * hash) + getRegion().hashCode(); } - if (getLocationsCount() > 0) { - hash = (37 * hash) + LOCATIONS_FIELD_NUMBER; - hash = (53 * hash) + getLocationsList().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1205,19 +1205,19 @@ public final class MapReduceProtos { public Builder clear() { super.clear(); + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); if (tableBuilder_ == null) { table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); } else { tableBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); if (regionBuilder_ == null) { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -1247,7 +1247,13 @@ public final class MapReduceProtos { org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit result = new org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.UnmodifiableLazyStringList( + locations_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.locations_ = locations_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } if (tableBuilder_ == null) { @@ -1255,7 +1261,7 @@ public final class MapReduceProtos { } else { result.table_ = tableBuilder_.build(); } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } if (regionBuilder_ == null) { @@ -1263,12 +1269,6 @@ public final class MapReduceProtos { } else { result.region_ = regionBuilder_.build(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.UnmodifiableLazyStringList( - locations_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.locations_ = locations_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1285,22 +1285,22 @@ public final class MapReduceProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit.getDefaultInstance()) return this; - if (other.hasTable()) { - mergeTable(other.getTable()); - } - if (other.hasRegion()) { - mergeRegion(other.getRegion()); - } if (!other.locations_.isEmpty()) { if (locations_.isEmpty()) { locations_ = other.locations_; - bitField0_ = (bitField0_ & ~0x00000004); + bitField0_ = (bitField0_ & ~0x00000001); } else { ensureLocationsIsMutable(); locations_.addAll(other.locations_); } onChanged(); } + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasRegion()) { + mergeRegion(other.getRegion()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -1340,18 +1340,111 @@ public final class MapReduceProtos { } private int bitField0_; - // optional .TableSchema table = 1; + // repeated string locations = 2; + private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureLocationsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + locations_ = new com.google.protobuf.LazyStringArrayList(locations_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string locations = 2; + */ + public java.util.List + getLocationsList() { + return java.util.Collections.unmodifiableList(locations_); + } + /** + * repeated string locations = 2; + */ + public int getLocationsCount() { + return locations_.size(); + } + /** + * repeated string locations = 2; + */ + public java.lang.String getLocations(int index) { + return locations_.get(index); + } + /** + * repeated string locations = 2; + */ + public com.google.protobuf.ByteString + getLocationsBytes(int index) { + return locations_.getByteString(index); + } + /** + * repeated string locations = 2; + */ + public Builder setLocations( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addLocations( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.add(value); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addAllLocations( + java.lang.Iterable values) { + ensureLocationsIsMutable(); + super.addAll(values, locations_); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder clearLocations() { + locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string locations = 2; + */ + public Builder addLocationsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureLocationsIsMutable(); + locations_.add(value); + onChanged(); + return this; + } + + // optional .TableSchema table = 3; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableBuilder_; /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public boolean hasTable() { - return ((bitField0_ & 0x00000001) == 0x00000001); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTable() { if (tableBuilder_ == null) { @@ -1361,7 +1454,7 @@ public final class MapReduceProtos { } } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { if (tableBuilder_ == null) { @@ -1373,11 +1466,11 @@ public final class MapReduceProtos { } else { tableBuilder_.setMessage(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder setTable( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { @@ -1387,15 +1480,15 @@ public final class MapReduceProtos { } else { tableBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && + if (((bitField0_ & 0x00000002) == 0x00000002) && table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(table_).mergeFrom(value).buildPartial(); @@ -1406,11 +1499,11 @@ public final class MapReduceProtos { } else { tableBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public Builder clearTable() { if (tableBuilder_ == null) { @@ -1419,19 +1512,19 @@ public final class MapReduceProtos { } else { tableBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000001); + bitField0_ = (bitField0_ & ~0x00000002); return this; } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableBuilder() { - bitField0_ |= 0x00000001; + bitField0_ |= 0x00000002; onChanged(); return getTableFieldBuilder().getBuilder(); } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableOrBuilder() { if (tableBuilder_ != null) { @@ -1441,7 +1534,7 @@ public final class MapReduceProtos { } } /** - * optional .TableSchema table = 1; + * optional .TableSchema table = 3; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> @@ -1457,18 +1550,18 @@ public final class MapReduceProtos { return tableBuilder_; } - // optional .RegionInfo region = 2; + // optional .RegionInfo region = 4; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_; /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public boolean hasRegion() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegion() { if (regionBuilder_ == null) { @@ -1478,7 +1571,7 @@ public final class MapReduceProtos { } } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { @@ -1490,11 +1583,11 @@ public final class MapReduceProtos { } else { regionBuilder_.setMessage(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder setRegion( org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { @@ -1504,15 +1597,15 @@ public final class MapReduceProtos { } else { regionBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { if (regionBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && + if (((bitField0_ & 0x00000004) == 0x00000004) && region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial(); @@ -1523,11 +1616,11 @@ public final class MapReduceProtos { } else { regionBuilder_.mergeFrom(value); } - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public Builder clearRegion() { if (regionBuilder_ == null) { @@ -1536,19 +1629,19 @@ public final class MapReduceProtos { } else { regionBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00000002); + bitField0_ = (bitField0_ & ~0x00000004); return this; } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000004; onChanged(); return getRegionFieldBuilder().getBuilder(); } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() { if (regionBuilder_ != null) { @@ -1558,7 +1651,7 @@ public final class MapReduceProtos { } } /** - * optional .RegionInfo region = 2; + * optional .RegionInfo region = 4; */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> @@ -1574,99 +1667,6 @@ public final class MapReduceProtos { return regionBuilder_; } - // repeated string locations = 3; - private com.google.protobuf.LazyStringList locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureLocationsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - locations_ = new com.google.protobuf.LazyStringArrayList(locations_); - bitField0_ |= 0x00000004; - } - } - /** - * repeated string locations = 3; - */ - public java.util.List - getLocationsList() { - return java.util.Collections.unmodifiableList(locations_); - } - /** - * repeated string locations = 3; - */ - public int getLocationsCount() { - return locations_.size(); - } - /** - * repeated string locations = 3; - */ - public java.lang.String getLocations(int index) { - return locations_.get(index); - } - /** - * repeated string locations = 3; - */ - public com.google.protobuf.ByteString - getLocationsBytes(int index) { - return locations_.getByteString(index); - } - /** - * repeated string locations = 3; - */ - public Builder setLocations( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addLocations( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.add(value); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addAllLocations( - java.lang.Iterable values) { - ensureLocationsIsMutable(); - super.addAll(values, locations_); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder clearLocations() { - locations_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - return this; - } - /** - * repeated string locations = 3; - */ - public Builder addLocationsBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureLocationsIsMutable(); - locations_.add(value); - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:TableSnapshotRegionSplit) } @@ -1699,9 +1699,9 @@ public final class MapReduceProtos { java.lang.String[] descriptorData = { "\n\017MapReduce.proto\032\013HBase.proto\".\n\013ScanMe" + "trics\022\037\n\007metrics\030\001 \003(\0132\016.NameInt64Pair\"g" + - "\n\030TableSnapshotRegionSplit\022\033\n\005table\030\001 \001(" + - "\0132\014.TableSchema\022\033\n\006region\030\002 \001(\0132\013.Region" + - "Info\022\021\n\tlocations\030\003 \003(\tBB\n*org.apache.ha" + + "\n\030TableSnapshotRegionSplit\022\021\n\tlocations\030" + + "\002 \003(\t\022\033\n\005table\030\003 \001(\0132\014.TableSchema\022\033\n\006re" + + "gion\030\004 \001(\0132\013.RegionInfoBB\n*org.apache.ha" + "doop.hbase.protobuf.generatedB\017MapReduce" + "ProtosH\001\240\001\001" }; @@ -1721,7 +1721,7 @@ public final class MapReduceProtos { internal_static_TableSnapshotRegionSplit_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSnapshotRegionSplit_descriptor, - new java.lang.String[] { "Table", "Region", "Locations", }); + new java.lang.String[] { "Locations", "Table", "Region", }); return null; } }; diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 0af2a97..9d037f5 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -3230,6 +3230,16 @@ public final class ZooKeeperProtos { * required .ServerName server_name = 2; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + boolean hasMode(); + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode(); } /** * Protobuf type {@code SplitLogTask} @@ -3312,6 +3322,17 @@ public final class ZooKeeperProtos { bitField0_ |= 0x00000002; break; } + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + mode_ = value; + } + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -3460,6 +3481,97 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(enum_scope:SplitLogTask.State) } + /** + * Protobuf enum {@code SplitLogTask.RecoveryMode} + */ + public enum RecoveryMode + implements com.google.protobuf.ProtocolMessageEnum { + /** + * UNKNOWN = 0; + */ + UNKNOWN(0, 0), + /** + * LOG_SPLITTING = 1; + */ + LOG_SPLITTING(1, 1), + /** + * LOG_REPLAY = 2; + */ + LOG_REPLAY(2, 2), + ; + + /** + * UNKNOWN = 0; + */ + public static final int UNKNOWN_VALUE = 0; + /** + * LOG_SPLITTING = 1; + */ + public static final int LOG_SPLITTING_VALUE = 1; + /** + * LOG_REPLAY = 2; + */ + public static final int LOG_REPLAY_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RecoveryMode valueOf(int value) { + switch (value) { + case 0: return UNKNOWN; + case 1: return LOG_SPLITTING; + case 2: return LOG_REPLAY; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RecoveryMode findValueByNumber(int number) { + return RecoveryMode.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(1); + } + + private static final RecoveryMode[] VALUES = values(); + + public static RecoveryMode valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RecoveryMode(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:SplitLogTask.RecoveryMode) + } + private int bitField0_; // required .SplitLogTask.State state = 1; public static final int STATE_FIELD_NUMBER = 1; @@ -3499,9 +3611,26 @@ public final class ZooKeeperProtos { return serverName_; } + // optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + public static final int MODE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode mode_; + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public boolean hasMode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode() { + return mode_; + } + private void initFields() { state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED; serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3533,6 +3662,9 @@ public final class ZooKeeperProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, serverName_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, mode_.getNumber()); + } getUnknownFields().writeTo(output); } @@ -3550,6 +3682,10 @@ public final class ZooKeeperProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, serverName_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, mode_.getNumber()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3583,6 +3719,11 @@ public final class ZooKeeperProtos { result = result && getServerName() .equals(other.getServerName()); } + result = result && (hasMode() == other.hasMode()); + if (hasMode()) { + result = result && + (getMode() == other.getMode()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3604,6 +3745,10 @@ public final class ZooKeeperProtos { hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER; hash = (53 * hash) + getServerName().hashCode(); } + if (hasMode()) { + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getMode()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3728,6 +3873,8 @@ public final class ZooKeeperProtos { serverNameBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); + mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -3768,6 +3915,10 @@ public final class ZooKeeperProtos { } else { result.serverName_ = serverNameBuilder_.build(); } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.mode_ = mode_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3790,6 +3941,9 @@ public final class ZooKeeperProtos { if (other.hasServerName()) { mergeServerName(other.getServerName()); } + if (other.hasMode()) { + setMode(other.getMode()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -3982,6 +4136,42 @@ public final class ZooKeeperProtos { return serverNameBuilder_; } + // optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN; + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public boolean hasMode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode getMode() { + return mode_; + } + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public Builder setMode(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + mode_ = value; + onChanged(); + return this; + } + /** + * optional .SplitLogTask.RecoveryMode mode = 3 [default = UNKNOWN]; + */ + public Builder clearMode() { + bitField0_ = (bitField0_ & ~0x00000004); + mode_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:SplitLogTask) } @@ -9399,29 +9589,32 @@ public final class ZooKeeperProtos { "gionTransition\022\027\n\017event_type_code\030\001 \002(\r\022" + "\023\n\013region_name\030\002 \002(\014\022\023\n\013create_time\030\003 \002(" + "\004\022 \n\013server_name\030\004 \002(\0132\013.ServerName\022\017\n\007p" + - "ayload\030\005 \001(\014\"\231\001\n\014SplitLogTask\022\"\n\005state\030\001" + + "ayload\030\005 \001(\014\"\214\002\n\014SplitLogTask\022\"\n\005state\030\001" + " \002(\0162\023.SplitLogTask.State\022 \n\013server_name", - "\030\002 \002(\0132\013.ServerName\"C\n\005State\022\016\n\nUNASSIGN" + - "ED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022" + - "\007\n\003ERR\020\004\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table" + - ".State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n" + - "\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003" + - "\"%\n\017ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\"" + - "^\n\020ReplicationState\022&\n\005state\030\001 \002(\0162\027.Rep" + - "licationState.State\"\"\n\005State\022\013\n\007ENABLED\020" + - "\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPositi" + - "on\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022", - "\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntab" + - "le_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030" + - "\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n" + - "\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cre" + - "ate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fam" + - "ily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026R" + - "egionStoreSequenceIds\022 \n\030last_flushed_se" + - "quence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003" + - "(\0132\020.StoreSequenceIdBE\n*org.apache.hadoo" + - "p.hbase.protobuf.generatedB\017ZooKeeperPro", - "tosH\001\210\001\001\240\001\001" + "\030\002 \002(\0132\013.ServerName\0221\n\004mode\030\003 \001(\0162\032.Spli" + + "tLogTask.RecoveryMode:\007UNKNOWN\"C\n\005State\022" + + "\016\n\nUNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002" + + "\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007U" + + "NKNOWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPLA" + + "Y\020\002\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.Stat" + + "e:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" + + "BLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"%\n\017R" + + "eplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\"^\n\020Re" + + "plicationState\022&\n\005state\030\001 \002(\0162\027.Replicat", + "ionState.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010" + + "DISABLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n" + + "\010position\030\001 \002(\003\"%\n\017ReplicationLock\022\022\n\nlo" + + "ck_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntable_na" + + "me\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030\002 \001(\013" + + "2\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_s" + + "hared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_t" + + "ime\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013family_n" + + "ame\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026Region" + + "StoreSequenceIds\022 \n\030last_flushed_sequenc", + "e_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020." + + "StoreSequenceIdBE\n*org.apache.hadoop.hba" + + "se.protobuf.generatedB\017ZooKeeperProtosH\001" + + "\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9457,7 +9650,7 @@ public final class ZooKeeperProtos { internal_static_SplitLogTask_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitLogTask_descriptor, - new java.lang.String[] { "State", "ServerName", }); + new java.lang.String[] { "State", "ServerName", "Mode", }); internal_static_Table_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_Table_fieldAccessorTable = new diff --git hbase-protocol/src/main/protobuf/Admin.proto hbase-protocol/src/main/protobuf/Admin.proto index 5b889cd..651025d 100644 --- hbase-protocol/src/main/protobuf/Admin.proto +++ hbase-protocol/src/main/protobuf/Admin.proto @@ -70,6 +70,8 @@ message OpenRegionRequest { repeated RegionOpenInfo open_info = 1; // the intended server for this RPC. optional uint64 serverStartCode = 2; + // open region for distributedLogReplay + optional bool isOpenForDistributedLogReplay = 3; message RegionOpenInfo { required RegionInfo region = 1; diff --git hbase-protocol/src/main/protobuf/ZooKeeper.proto hbase-protocol/src/main/protobuf/ZooKeeper.proto index 082e1f7..37816da 100644 --- hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -85,8 +85,14 @@ message SplitLogTask { DONE = 3; ERR = 4; } + enum RecoveryMode { + UNKNOWN = 0; + LOG_SPLITTING = 1; + LOG_REPLAY = 2; + } required State state = 1; required ServerName server_name = 2; + optional RecoveryMode mode = 3 [default = UNKNOWN]; } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index 67a0994..f030950 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -18,10 +18,12 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.util.Bytes; import com.google.protobuf.InvalidProtocolBufferException; @@ -36,49 +38,62 @@ import com.google.protobuf.InvalidProtocolBufferException; public class SplitLogTask { private final ServerName originServer; private final ZooKeeperProtos.SplitLogTask.State state; + private final ZooKeeperProtos.SplitLogTask.RecoveryMode mode; public static class Unassigned extends SplitLogTask { - public Unassigned(final ServerName originServer) { - super(originServer, ZooKeeperProtos.SplitLogTask.State.UNASSIGNED); + public Unassigned(final ServerName originServer, final Configuration conf) { + super(originServer, ZooKeeperProtos.SplitLogTask.State.UNASSIGNED, conf); } } public static class Owned extends SplitLogTask { public Owned(final ServerName originServer) { - super(originServer, ZooKeeperProtos.SplitLogTask.State.OWNED); + super(originServer, ZooKeeperProtos.SplitLogTask.State.OWNED, null); } } public static class Resigned extends SplitLogTask { public Resigned(final ServerName originServer) { - super(originServer, ZooKeeperProtos.SplitLogTask.State.RESIGNED); + super(originServer, ZooKeeperProtos.SplitLogTask.State.RESIGNED, null); } } public static class Done extends SplitLogTask { public Done(final ServerName originServer) { - super(originServer, ZooKeeperProtos.SplitLogTask.State.DONE); + super(originServer, ZooKeeperProtos.SplitLogTask.State.DONE, null); } } public static class Err extends SplitLogTask { public Err(final ServerName originServer) { - super(originServer, ZooKeeperProtos.SplitLogTask.State.ERR); + super(originServer, ZooKeeperProtos.SplitLogTask.State.ERR, null); } } SplitLogTask(final ZooKeeperProtos.SplitLogTask slt) { - this(ProtobufUtil.toServerName(slt.getServerName()), slt.getState()); + this.originServer = ProtobufUtil.toServerName(slt.getServerName()); + this.state = slt.getState(); + this.mode = (slt.hasMode()) ? slt.getMode() : + ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN; } - SplitLogTask(final ServerName originServer, final ZooKeeperProtos.SplitLogTask.State state) { + SplitLogTask(final ServerName originServer, final ZooKeeperProtos.SplitLogTask.State state, + final Configuration conf) { this.originServer = originServer; this.state = state; + this.mode = (conf == null) ? ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN : + ((HLogSplitter.isDistributedLogReplay(conf)) ? + ZooKeeperProtos.SplitLogTask.RecoveryMode.LOG_REPLAY : + ZooKeeperProtos.SplitLogTask.RecoveryMode.LOG_SPLITTING); } public ServerName getServerName() { return this.originServer; } + + public ZooKeeperProtos.SplitLogTask.RecoveryMode getMode() { + return this.mode; + } public boolean isUnassigned(final ServerName sn) { return this.originServer.equals(sn) && isUnassigned(); @@ -167,7 +182,8 @@ public class SplitLogTask { // pbs just created. HBaseProtos.ServerName snpb = ProtobufUtil.toServerName(this.originServer); ZooKeeperProtos.SplitLogTask slts = - ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state).build(); + ZooKeeperProtos.SplitLogTask.newBuilder().setServerName(snpb).setState(this.state). + setMode(this.mode).build(); return ProtobufUtil.prependPBMagic(slts.toByteArray()); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index f975b3e..3b6ab49 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -493,7 +493,7 @@ public class SplitLogManager extends ZooKeeperListener { if (count == 0 && this.master.isInitialized() && !this.master.getServerManager().areDeadServersInProgress()) { // no splitting work items left - deleteRecoveringRegionZNodes(null); + deleteRecoveringRegionZNodes(watcher, null); // reset lastRecoveringNodeCreationTime because we cleared all recovering znodes at // this point. lastRecoveringNodeCreationTime = Long.MAX_VALUE; @@ -549,14 +549,6 @@ public class SplitLogManager extends ZooKeeperListener { void removeStaleRecoveringRegionsFromZK(final Set failedServers) throws KeeperException, InterruptedIOException { - if (!this.distributedLogReplay) { - // remove any regions in recovery from ZK which could happen when we turn the feature on - // and later turn it off - ZKUtil.deleteChildrenRecursively(watcher, watcher.recoveringRegionsZNode); - // the function is only used in distributedLogReplay mode when master is in initialization - return; - } - Set knownFailedServers = new HashSet(); if (failedServers != null) { for (ServerName tmpServerName : failedServers) { @@ -624,7 +616,7 @@ public class SplitLogManager extends ZooKeeperListener { } } - private void deleteRecoveringRegionZNodes(List regions) { + public static void deleteRecoveringRegionZNodes(ZooKeeperWatcher watcher, List regions) { try { if (regions == null) { // remove all children under /home/recovering-regions @@ -682,7 +674,7 @@ public class SplitLogManager extends ZooKeeperListener { } private void createNode(String path, Long retry_count) { - SplitLogTask slt = new SplitLogTask.Unassigned(serverName); + SplitLogTask slt = new SplitLogTask.Unassigned(serverName, this.conf); ZKUtil.asyncCreate(this.watcher, path, slt.toByteArray(), new CreateAsyncCallback(), retry_count); SplitLogCounters.tot_mgr_node_create_queued.incrementAndGet(); return; @@ -858,7 +850,7 @@ public class SplitLogManager extends ZooKeeperListener { task.incarnation++; try { // blocking zk call but this is done from the timeout thread - SplitLogTask slt = new SplitLogTask.Unassigned(this.serverName); + SplitLogTask slt = new SplitLogTask.Unassigned(this.serverName, this.conf); if (ZKUtil.setData(this.watcher, path, slt.toByteArray(), version) == false) { LOG.debug("failed to resubmit task " + path + " version changed"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 2e7b022..b2474ea 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1184,6 +1184,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, + regionServer.serverName)); } } + // check if current region open is for distributedLogReplay. This check is to support rolling + // restart/upgrade where we want to Master/RS see same configuration + if (request.hasIsOpenForDistributedLogReplay() && request.getIsOpenForDistributedLogReplay()) { + // check if current RS has distributedLogReplayh on + if (!regionServer.distributedLogReplay) { + throw new ServiceException(new DoNotRetryIOException("This OpenRegion request is opening " + + "region for recovering while this server " + regionServer.serverName + + " hasn't turn on distributedLogReplay yet.")); + } + } OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder(); final int regionCount = request.getOpenInfoCount(); final Map htds = @@ -1260,10 +1270,17 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (previous == null) { // check if the region to be opened is marked in recovering state in ZK - if (regionServer.distributedLogReplay - && SplitLogManager.isRegionMarkedRecoveringInZK(regionServer.getZooKeeper(), - region.getEncodedName())) { - regionServer.recoveringRegions.put(region.getEncodedName(), null); + if (SplitLogManager.isRegionMarkedRecoveringInZK(regionServer.getZooKeeper(), + region.getEncodedName())) { + if(regionServer.distributedLogReplay) { + regionServer.recoveringRegions.put(region.getEncodedName(), null); + } else { + // remove stale recovery region from ZK when we open region not for recovering which + // could happen when turn distributedLogReplay off from on. + List tmpRegions = new ArrayList(); + tmpRegions.add(region.getEncodedName()); + SplitLogManager.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(), tmpRegions); + } } // If there is no action in progress, we can submit a specific handler. // Need to pass the expected version in the constructor. diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index e01bb00..45dae54 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.SplitLogManager; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; @@ -302,7 +303,6 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { */ private void grabTask(String path) { Stat stat = new Stat(); - long t = -1; byte[] data; synchronized (grabTaskLock) { currentTask = path; @@ -335,6 +335,16 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { return; } + // RS & Master have to see the same configuration values + if((slt.getMode() == ZooKeeperProtos.SplitLogTask.RecoveryMode.UNKNOWN) + || (slt.getMode() == ZooKeeperProtos.SplitLogTask.RecoveryMode.LOG_REPLAY && + !HLogSplitter.isDistributedLogReplay(conf)) + || (slt.getMode() == ZooKeeperProtos.SplitLogTask.RecoveryMode.LOG_SPLITTING && + HLogSplitter.isDistributedLogReplay(conf))) { + LOG.debug("Didn't grab Task=" + path + " because recovery mode isn't expected. Current " + + "task has recovery mode=" + slt.getMode()); + return; + } currentVersion = attemptToOwnTask(true, watcher, serverName, path, stat.getVersion()); if (currentVersion < 0) { SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index d7082ed..3835d44 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -120,7 +120,7 @@ public class TestSerialization { @Test public void testSplitLogTask() throws DeserializationException { - SplitLogTask slt = new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")); + SplitLogTask slt = new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1"), null); byte [] bytes = slt.toByteArray(); SplitLogTask sltDeserialized = SplitLogTask.parseFrom(bytes); assertTrue(slt.equals(sltDeserialized)); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 82b8673..df780f8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -238,7 +238,7 @@ public class TestSplitLogManager { " startup"); String tasknode = ZKSplitLog.getEncodedNodeName(zkw, "orphan/test/slash"); //create an unassigned orphan task - SplitLogTask slt = new SplitLogTask.Unassigned(DUMMY_MASTER); + SplitLogTask slt = new SplitLogTask.Unassigned(DUMMY_MASTER, conf); zkw.getRecoverableZooKeeper().create(tasknode, slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); int version = ZKUtil.checkExists(zkw, tasknode); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index eaf5547..a9011f8 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -31,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; @@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; +import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -149,7 +152,9 @@ public class TestSplitLogWorker { final ServerName RS = ServerName.valueOf("rs,1,1"); RegionServerServices mockedRS = getRegionServer(RS); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), - new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), Ids.OPEN_ACL_UNSAFE, + new SplitLogTask.Unassigned( + ServerName.valueOf("mgr,1,1"), TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); SplitLogWorker slw = @@ -184,8 +189,8 @@ public class TestSplitLogWorker { final ServerName SVR1 = ServerName.valueOf("svr1,1,1"); final ServerName SVR2 = ServerName.valueOf("svr2,1,1"); zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TRFT), - new SplitLogTask.Unassigned(MANAGER).toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + new SplitLogTask.Unassigned(MANAGER, TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); RegionServerServices mockedRS1 = getRegionServer(SVR1); RegionServerServices mockedRS2 = getRegionServer(SVR2); SplitLogWorker slw1 = @@ -227,8 +232,8 @@ public class TestSplitLogWorker { // this time create a task node after starting the splitLogWorker zkw.getRecoverableZooKeeper().create(PATH, - new SplitLogTask.Unassigned(MANAGER).toByteArray(), Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + new SplitLogTask.Unassigned(MANAGER, TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, 1, WAIT_TIME); assertEquals(1, slw.taskReadySeq); @@ -258,7 +263,8 @@ public class TestSplitLogWorker { Thread.sleep(100); waitForCounter(SplitLogCounters.tot_wkr_task_grabing, 0, 1, WAIT_TIME); - SplitLogTask unassignedManager = new SplitLogTask.Unassigned(MANAGER); + SplitLogTask unassignedManager = + new SplitLogTask.Unassigned(MANAGER, TEST_UTIL.getConfiguration()); zkw.getRecoverableZooKeeper().create(PATH1, unassignedManager.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -298,7 +304,7 @@ public class TestSplitLogWorker { Thread.sleep(100); String task = ZKSplitLog.getEncodedNodeName(zkw, "task"); - SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER); + SplitLogTask slt = new SplitLogTask.Unassigned(MANAGER, TEST_UTIL.getConfiguration()); zkw.getRecoverableZooKeeper().create(task,slt.toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); @@ -351,8 +357,9 @@ public class TestSplitLogWorker { for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), - new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + new SplitLogTask.Unassigned( + ServerName.valueOf("mgr,1,1"), TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(zkw, testConf, mockedRS, neverEndingTask); @@ -394,9 +401,9 @@ public class TestSplitLogWorker { for (int i = 0; i < maxTasks; i++) { zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i), - new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(), - Ids.OPEN_ACL_UNSAFE, - CreateMode.PERSISTENT); + new SplitLogTask.Unassigned( + ServerName.valueOf("mgr,1,1"), TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } SplitLogWorker slw = new SplitLogWorker(zkw, testConf, mockedRS, neverEndingTask); @@ -416,6 +423,34 @@ public class TestSplitLogWorker { stopSplitLogWorker(slw); } } + + @Test(timeout=60000) + public void testNotAcquireTaskOfDifferentRecoveryMode() throws Exception { + LOG.info("testNotAcquireTaskOfDifferentRecoveryMode"); + SplitLogCounters.resetCounters(); + final String TATAS = "tatas"; + final ServerName RS = ServerName.valueOf("rs,1,1"); + Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); + boolean isDistributedLogReplay = HLogSplitter.isDistributedLogReplay(testConf); + testConf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, !isDistributedLogReplay); + RegionServerServices mockedRS = getRegionServer(RS); + + zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS), + new SplitLogTask.Unassigned( + ServerName.valueOf("mgr,1,1"), TEST_UTIL.getConfiguration()).toByteArray(), + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + + SplitLogWorker slw = new SplitLogWorker(zkw, testConf, mockedRS, neverEndingTask); + slw.start(); + Thread.sleep(WAIT_TIME); + try { + byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS)); + SplitLogTask slt = SplitLogTask.parseFrom(bytes); + assertFalse(slt.isOwned(RS)); + } finally { + stopSplitLogWorker(slw); + } + } /** * Create a mocked region server service instance