From 4a076e2b8cb37907d22cec7892769bc74282dcac Mon Sep 17 00:00:00 2001 From: chenheng Date: Thu, 10 Mar 2016 10:41:57 +0800 Subject: [PATCH] HBASE-15406 Split / merge switch left disabled after early termination of hbck --- .../java/org/apache/hadoop/hbase/client/Admin.java | 12 + .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 9 +- .../hadoop/hbase/protobuf/RequestConverter.java | 3 +- .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java | 10 + .../apache/hadoop/hbase/zookeeper/TestZKUtil.java | 3 + .../hbase/protobuf/generated/ClientProtos.java | 34 +- .../hbase/protobuf/generated/MasterProtos.java | 633 +++++++----- .../hbase/protobuf/generated/ZooKeeperProtos.java | 1084 +++++++++++++++++++- hbase-protocol/src/main/protobuf/Master.proto | 1 + hbase-protocol/src/main/protobuf/ZooKeeper.proto | 15 + .../hadoop/hbase/master/MasterRpcServices.java | 8 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 165 ++- .../hbase/zookeeper/SplitOrMergeTracker.java | 47 +- .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 54 + 14 files changed, 1781 insertions(+), 297 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c3b524b..09bc3c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1689,6 +1689,18 @@ public interface Admin extends Abortable, Closeable { final MasterSwitchType... switchTypes) throws IOException; /** + * Turn the Split or Merge switches on or off with key + * + * @param enabled enabled or not + * @param synchronous If true, it waits until current split() call, if outstanding, to return. + * @param switchTypes switchType list {@link MasterSwitchType} + * @return Previous switch value array + */ + boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, + final String key, + final MasterSwitchType... switchTypes) throws IOException; + + /** * Query the current state of the switch * * @return true if the switch is enabled, false otherwise. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c2a0bb8..b88744f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -3383,11 +3383,18 @@ public class HBaseAdmin implements Admin { public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, final MasterSwitchType... switchTypes) throws IOException { + return setSplitOrMergeEnabled(enabled, synchronous, "", switchTypes); + } + + @Override + public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, + final String key, final MasterSwitchType... switchTypes) throws IOException { return executeCallable(new MasterCallable(getConnection()) { @Override public boolean[] call(int callTimeout) throws ServiceException { MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null, - RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes)); + RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, + key, switchTypes)); boolean[] result = new boolean[switchTypes.length]; int i = 0; for (Boolean prevValue : response.getPrevValueList()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 99e993d..21d7f5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -1720,10 +1720,11 @@ public final class RequestConverter { * @return a SetSplitOrMergeEnabledRequest */ public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled, - boolean synchronous, Admin.MasterSwitchType... switchTypes) { + boolean synchronous, String key, Admin.MasterSwitchType... switchTypes) { SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder(); builder.setEnabled(enabled); builder.setSynchronous(synchronous); + builder.setKey(key); for (Admin.MasterSwitchType switchType : switchTypes) { builder.addSwitchTypes(convert(switchType)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index b665353..205d397 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -117,6 +117,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. private String switchZNode; + // znode containing the lock for the switches + private String switchLockZNode; // znode containing the lock for the tables public String tableLockZNode; // znode containing the state of recovering regions @@ -433,6 +435,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { regionNormalizerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); + switchLockZNode = ZKUtil.joinZNode(switchZNode, "locks"); tableLockZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, @@ -799,4 +802,11 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String getSwitchZNode() { return switchZNode; } + + /** + * @return ZK node for switchLock node. + * */ + public String getSwitchLockZNode() { + return switchLockZNode; + } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index 2795472..a7882d9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -26,10 +26,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs.Perms; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; import org.junit.Assert; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -53,4 +55,5 @@ public class TestZKUtil { && aclList.contains(new ACL(Perms.ALL, new Id("auth", "user2"))) && aclList.contains(new ACL(Perms.ALL, new Id("auth", "user3")))); } + } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 0a9922c..4deab19 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -29034,7 +29034,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionSpecifier region = 1; */ - java.util.List + java.util.List getRegionList(); /** * repeated .hbase.pb.RegionSpecifier region = 1; @@ -29047,7 +29047,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionSpecifier region = 1; */ - java.util.List + java.util.List getRegionOrBuilderList(); /** * repeated .hbase.pb.RegionSpecifier region = 1; @@ -29059,7 +29059,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionLoadStats stat = 2; */ - java.util.List + java.util.List getStatList(); /** * repeated .hbase.pb.RegionLoadStats stat = 2; @@ -29072,7 +29072,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionLoadStats stat = 2; */ - java.util.List + java.util.List getStatOrBuilderList(); /** * repeated .hbase.pb.RegionLoadStats stat = 2; @@ -29204,7 +29204,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionSpecifier region = 1; */ - public java.util.List + public java.util.List getRegionOrBuilderList() { return region_; } @@ -29240,7 +29240,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionLoadStats stat = 2; */ - public java.util.List + public java.util.List getStatOrBuilderList() { return stat_; } @@ -29558,7 +29558,7 @@ public final class ClientProtos { regionBuilder_ = null; region_ = other.region_; bitField0_ = (bitField0_ & ~0x00000001); - regionBuilder_ = + regionBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getRegionFieldBuilder() : null; } else { @@ -29584,7 +29584,7 @@ public final class ClientProtos { statBuilder_ = null; stat_ = other.stat_; bitField0_ = (bitField0_ & ~0x00000002); - statBuilder_ = + statBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getStatFieldBuilder() : null; } else { @@ -29599,7 +29599,7 @@ public final class ClientProtos { public final boolean isInitialized() { for (int i = 0; i < getRegionCount(); i++) { if (!getRegion(i).isInitialized()) { - + return false; } } @@ -29820,7 +29820,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionSpecifier region = 1; */ - public java.util.List + public java.util.List getRegionOrBuilderList() { if (regionBuilder_ != null) { return regionBuilder_.getMessageOrBuilderList(); @@ -29846,12 +29846,12 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionSpecifier region = 1; */ - public java.util.List + public java.util.List getRegionBuilderList() { return getRegionFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> getRegionFieldBuilder() { if (regionBuilder_ == null) { regionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< @@ -30060,7 +30060,7 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionLoadStats stat = 2; */ - public java.util.List + public java.util.List getStatOrBuilderList() { if (statBuilder_ != null) { return statBuilder_.getMessageOrBuilderList(); @@ -30086,12 +30086,12 @@ public final class ClientProtos { /** * repeated .hbase.pb.RegionLoadStats stat = 2; */ - public java.util.List + public java.util.List getStatBuilderList() { return getStatFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionLoadStatsOrBuilder> getStatFieldBuilder() { if (statBuilder_ == null) { statBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< @@ -34220,7 +34220,7 @@ public final class ClientProtos { } if (hasRegionStatistics()) { if (!getRegionStatistics().isInitialized()) { - + return false; } } @@ -34639,7 +34639,7 @@ public final class ClientProtos { * optional .hbase.pb.MultiRegionLoadStats regionStatistics = 3; */ private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder> + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStatsOrBuilder> getRegionStatisticsFieldBuilder() { if (regionStatisticsBuilder_ == null) { regionStatisticsBuilder_ = new com.google.protobuf.SingleFieldBuilder< diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 073eba9..2df8c7c 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -28882,6 +28882,21 @@ public final class MasterProtos { * repeated .hbase.pb.MasterSwitchType switch_types = 3; */ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType getSwitchTypes(int index); + + // optional string key = 4; + /** + * optional string key = 4; + */ + boolean hasKey(); + /** + * optional string key = 4; + */ + java.lang.String getKey(); + /** + * optional string key = 4; + */ + com.google.protobuf.ByteString + getKeyBytes(); } /** * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest} @@ -28977,6 +28992,11 @@ public final class MasterProtos { input.popLimit(oldLimit); break; } + case 34: { + bitField0_ |= 0x00000004; + key_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -29074,10 +29094,54 @@ public final class MasterProtos { return switchTypes_.get(index); } + // optional string key = 4; + public static final int KEY_FIELD_NUMBER = 4; + private java.lang.Object key_; + /** + * optional string key = 4; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string key = 4; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * optional string key = 4; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { enabled_ = false; synchronous_ = false; switchTypes_ = java.util.Collections.emptyList(); + key_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -29104,6 +29168,9 @@ public final class MasterProtos { for (int i = 0; i < switchTypes_.size(); i++) { output.writeEnum(3, switchTypes_.get(i).getNumber()); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(4, getKeyBytes()); + } getUnknownFields().writeTo(output); } @@ -29130,6 +29197,10 @@ public final class MasterProtos { size += dataSize; size += 1 * switchTypes_.size(); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getKeyBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -29165,6 +29236,11 @@ public final class MasterProtos { } result = result && getSwitchTypesList() .equals(other.getSwitchTypesList()); + result = result && (hasKey() == other.hasKey()); + if (hasKey()) { + result = result && getKey() + .equals(other.getKey()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -29190,6 +29266,10 @@ public final class MasterProtos { hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getSwitchTypesList()); } + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -29305,6 +29385,8 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000002); switchTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -29346,6 +29428,10 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000004); } result.switchTypes_ = switchTypes_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.key_ = key_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -29378,6 +29464,11 @@ public final class MasterProtos { } onChanged(); } + if (other.hasKey()) { + bitField0_ |= 0x00000008; + key_ = other.key_; + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -29547,6 +29638,80 @@ public final class MasterProtos { return this; } + // optional string key = 4; + private java.lang.Object key_ = ""; + /** + * optional string key = 4; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string key = 4; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string key = 4; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string key = 4; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + key_ = value; + onChanged(); + return this; + } + /** + * optional string key = 4; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000008); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * optional string key = 4; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + key_ = value; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.SetSplitOrMergeEnabledRequest) } @@ -64583,239 +64748,239 @@ public final class MasterProtos { "ncerRunningResponse\022\032\n\022prev_balance_valu", "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" + "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" + - "\010\"w\n\035SetSplitOrMergeEnabledRequest\022\017\n\007en" + - "abled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swit" + - "ch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchTy" + - "pe\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\n" + - "prev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnable" + - "dRequest\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb" + - ".MasterSwitchType\"0\n\035IsSplitOrMergeEnabl" + - "edResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020Normalize", - "Request\"+\n\021NormalizeResponse\022\026\n\016normaliz" + - "er_ran\030\001 \002(\010\")\n\033SetNormalizerRunningRequ" + - "est\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningR" + - "esponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034" + - "\n\032IsNormalizerEnabledRequest\".\n\033IsNormal" + - "izerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025" + - "RunCatalogScanRequest\"-\n\026RunCatalogScanR" + - "esponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCa" + - "talogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034E" + - "nableCatalogJanitorResponse\022\022\n\nprev_valu", - "e\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReques" + - "t\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005" + - "value\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsh" + - "ot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"" + - ",\n\020SnapshotResponse\022\030\n\020expected_timeout\030" + - "\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n" + - "\035GetCompletedSnapshotsResponse\0220\n\tsnapsh" + - "ots\030\001 \003(\0132\035.hbase.pb.SnapshotDescription" + - "\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 " + - "\002(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026De", - "leteSnapshotResponse\"I\n\026RestoreSnapshotR" + - "equest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snap" + - "shotDescription\"\031\n\027RestoreSnapshotRespon" + - "se\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030" + - "\001 \001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026" + - "IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fa" + - "lse\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snapsho" + - "tDescription\"O\n\034IsRestoreSnapshotDoneReq" + - "uest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapsh" + - "otDescription\"4\n\035IsRestoreSnapshotDoneRe", - "sponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchema" + - "AlterStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + - ".hbase.pb.TableName\"T\n\034GetSchemaAlterSta" + - "tusResponse\022\035\n\025yet_to_update_regions\030\001 \001" + - "(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDe" + - "scriptorsRequest\022(\n\013table_names\030\001 \003(\0132\023." + - "hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inc" + - "lude_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespac" + - "e\030\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+" + - "\n\014table_schema\030\001 \003(\0132\025.hbase.pb.TableSch", - "ema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001" + - "(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n" + - "\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesRespons" + - "e\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableN" + - "ame\"?\n\024GetTableStateRequest\022\'\n\ntable_nam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTable" + - "StateResponse\022)\n\013table_state\030\001 \002(\0132\024.hba" + - "se.pb.TableState\"\031\n\027GetClusterStatusRequ" + - "est\"K\n\030GetClusterStatusResponse\022/\n\016clust" + - "er_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus", - "\"\030\n\026IsMasterRunningRequest\"4\n\027IsMasterRu" + - "nningResponse\022\031\n\021is_master_running\030\001 \002(\010" + - "\"I\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 " + - "\002(\0132\036.hbase.pb.ProcedureDescription\"F\n\025E" + - "xecProcedureResponse\022\030\n\020expected_timeout" + - "\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedu" + - "reDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase" + - ".pb.ProcedureDescription\"`\n\027IsProcedureD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snap" + - "shot\030\002 \001(\0132\036.hbase.pb.ProcedureDescripti", - "on\",\n\031GetProcedureResultRequest\022\017\n\007proc_" + - "id\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\022" + - "9\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedureRe" + - "sultResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023" + - "\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\te" + - "xception\030\005 \001(\0132!.hbase.pb.ForeignExcepti" + - "onMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUN" + - "NING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRe" + - "quest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIf" + - "Running\030\002 \001(\010:\004true\"6\n\026AbortProcedureRes", - "ponse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025L" + - "istProceduresRequest\"@\n\026ListProceduresRe" + - "sponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Pro" + - "cedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030" + - "\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003" + - " \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.Tabl" + - "eName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_glob" + - "als\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.T" + - "hrottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Ma" + - "jorCompactionTimestampRequest\022\'\n\ntable_n", - "ame\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorC" + - "ompactionTimestampForRegionRequest\022)\n\006re" + - "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n" + - " MajorCompactionTimestampResponse\022\034\n\024com" + - "paction_timestamp\030\001 \002(\003\"\035\n\033SecurityCapab" + - "ilitiesRequest\"\354\001\n\034SecurityCapabilitiesR" + - "esponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb" + - ".SecurityCapabilitiesResponse.Capability" + - "\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION" + - "\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORI", - "ZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL" + - "_VISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPL" + - "IT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022e\n\024Get" + - "SchemaAlterStatus\022%.hbase.pb.GetSchemaAl" + - "terStatusRequest\032&.hbase.pb.GetSchemaAlt" + - "erStatusResponse\022b\n\023GetTableDescriptors\022" + - "$.hbase.pb.GetTableDescriptorsRequest\032%." + - "hbase.pb.GetTableDescriptorsResponse\022P\n\r" + - "GetTableNames\022\036.hbase.pb.GetTableNamesRe" + - "quest\032\037.hbase.pb.GetTableNamesResponse\022Y", - "\n\020GetClusterStatus\022!.hbase.pb.GetCluster" + - "StatusRequest\032\".hbase.pb.GetClusterStatu" + - "sResponse\022V\n\017IsMasterRunning\022 .hbase.pb." + - "IsMasterRunningRequest\032!.hbase.pb.IsMast" + - "erRunningResponse\022D\n\tAddColumn\022\032.hbase.p" + - "b.AddColumnRequest\032\033.hbase.pb.AddColumnR" + - "esponse\022M\n\014DeleteColumn\022\035.hbase.pb.Delet" + - "eColumnRequest\032\036.hbase.pb.DeleteColumnRe" + - "sponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modify" + - "ColumnRequest\032\036.hbase.pb.ModifyColumnRes", - "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio" + - "nRequest\032\034.hbase.pb.MoveRegionResponse\022k" + - "\n\026DispatchMergingRegions\022\'.hbase.pb.Disp" + - "atchMergingRegionsRequest\032(.hbase.pb.Dis" + - "patchMergingRegionsResponse\022M\n\014AssignReg" + - "ion\022\035.hbase.pb.AssignRegionRequest\032\036.hba" + - "se.pb.AssignRegionResponse\022S\n\016UnassignRe" + - "gion\022\037.hbase.pb.UnassignRegionRequest\032 ." + - "hbase.pb.UnassignRegionResponse\022P\n\rOffli" + - "neRegion\022\036.hbase.pb.OfflineRegionRequest", - "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del" + - "eteTable\022\034.hbase.pb.DeleteTableRequest\032\035" + - ".hbase.pb.DeleteTableResponse\022P\n\rtruncat" + - "eTable\022\036.hbase.pb.TruncateTableRequest\032\037" + - ".hbase.pb.TruncateTableResponse\022J\n\013Enabl" + - "eTable\022\034.hbase.pb.EnableTableRequest\032\035.h" + - "base.pb.EnableTableResponse\022M\n\014DisableTa" + - "ble\022\035.hbase.pb.DisableTableRequest\032\036.hba" + - "se.pb.DisableTableResponse\022J\n\013ModifyTabl" + - "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase.", - "pb.ModifyTableResponse\022J\n\013CreateTable\022\034." + - "hbase.pb.CreateTableRequest\032\035.hbase.pb.C" + - "reateTableResponse\022A\n\010Shutdown\022\031.hbase.p" + - "b.ShutdownRequest\032\032.hbase.pb.ShutdownRes" + - "ponse\022G\n\nStopMaster\022\033.hbase.pb.StopMaste" + - "rRequest\032\034.hbase.pb.StopMasterResponse\022>" + - "\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hb" + - "ase.pb.BalanceResponse\022_\n\022SetBalancerRun" + - "ning\022#.hbase.pb.SetBalancerRunningReques" + - "t\032$.hbase.pb.SetBalancerRunningResponse\022", - "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc" + - "erEnabledRequest\032#.hbase.pb.IsBalancerEn" + - "abledResponse\022k\n\026SetSplitOrMergeEnabled\022" + - "\'.hbase.pb.SetSplitOrMergeEnabledRequest" + - "\032(.hbase.pb.SetSplitOrMergeEnabledRespon" + - "se\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.I" + - "sSplitOrMergeEnabledRequest\032\'.hbase.pb.I" + - "sSplitOrMergeEnabledResponse\022D\n\tNormaliz" + - "e\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb" + - ".NormalizeResponse\022e\n\024SetNormalizerRunni", - "ng\022%.hbase.pb.SetNormalizerRunningReques" + - "t\032&.hbase.pb.SetNormalizerRunningRespons" + - "e\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNo" + - "rmalizerEnabledRequest\032%.hbase.pb.IsNorm" + - "alizerEnabledResponse\022S\n\016RunCatalogScan\022" + - "\037.hbase.pb.RunCatalogScanRequest\032 .hbase" + - ".pb.RunCatalogScanResponse\022e\n\024EnableCata" + - "logJanitor\022%.hbase.pb.EnableCatalogJanit" + - "orRequest\032&.hbase.pb.EnableCatalogJanito" + - "rResponse\022n\n\027IsCatalogJanitorEnabled\022(.h", - "base.pb.IsCatalogJanitorEnabledRequest\032)" + - ".hbase.pb.IsCatalogJanitorEnabledRespons" + - "e\022^\n\021ExecMasterService\022#.hbase.pb.Coproc" + - "essorServiceRequest\032$.hbase.pb.Coprocess" + - "orServiceResponse\022A\n\010Snapshot\022\031.hbase.pb" + - ".SnapshotRequest\032\032.hbase.pb.SnapshotResp" + - "onse\022h\n\025GetCompletedSnapshots\022&.hbase.pb" + - ".GetCompletedSnapshotsRequest\032\'.hbase.pb" + - ".GetCompletedSnapshotsResponse\022S\n\016Delete" + - "Snapshot\022\037.hbase.pb.DeleteSnapshotReques", - "t\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016I" + - "sSnapshotDone\022\037.hbase.pb.IsSnapshotDoneR" + - "equest\032 .hbase.pb.IsSnapshotDoneResponse" + - "\022V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSn" + - "apshotRequest\032!.hbase.pb.RestoreSnapshot" + - "Response\022h\n\025IsRestoreSnapshotDone\022&.hbas" + - "e.pb.IsRestoreSnapshotDoneRequest\032\'.hbas" + - "e.pb.IsRestoreSnapshotDoneResponse\022P\n\rEx" + - "ecProcedure\022\036.hbase.pb.ExecProcedureRequ" + - "est\032\037.hbase.pb.ExecProcedureResponse\022W\n\024", - "ExecProcedureWithRet\022\036.hbase.pb.ExecProc" + - "edureRequest\032\037.hbase.pb.ExecProcedureRes" + - "ponse\022V\n\017IsProcedureDone\022 .hbase.pb.IsPr" + - "ocedureDoneRequest\032!.hbase.pb.IsProcedur" + - "eDoneResponse\022V\n\017ModifyNamespace\022 .hbase" + - ".pb.ModifyNamespaceRequest\032!.hbase.pb.Mo" + - "difyNamespaceResponse\022V\n\017CreateNamespace" + - "\022 .hbase.pb.CreateNamespaceRequest\032!.hba" + - "se.pb.CreateNamespaceResponse\022V\n\017DeleteN" + - "amespace\022 .hbase.pb.DeleteNamespaceReque", - "st\032!.hbase.pb.DeleteNamespaceResponse\022k\n" + - "\026GetNamespaceDescriptor\022\'.hbase.pb.GetNa" + - "mespaceDescriptorRequest\032(.hbase.pb.GetN" + - "amespaceDescriptorResponse\022q\n\030ListNamesp" + - "aceDescriptors\022).hbase.pb.ListNamespaceD" + - "escriptorsRequest\032*.hbase.pb.ListNamespa" + - "ceDescriptorsResponse\022\206\001\n\037ListTableDescr" + - "iptorsByNamespace\0220.hbase.pb.ListTableDe" + - "scriptorsByNamespaceRequest\0321.hbase.pb.L" + - "istTableDescriptorsByNamespaceResponse\022t", - "\n\031ListTableNamesByNamespace\022*.hbase.pb.L" + - "istTableNamesByNamespaceRequest\032+.hbase." + - "pb.ListTableNamesByNamespaceResponse\022P\n\r" + - "GetTableState\022\036.hbase.pb.GetTableStateRe" + - "quest\032\037.hbase.pb.GetTableStateResponse\022A" + - "\n\010SetQuota\022\031.hbase.pb.SetQuotaRequest\032\032." + - "hbase.pb.SetQuotaResponse\022x\n\037getLastMajo" + - "rCompactionTimestamp\022).hbase.pb.MajorCom" + - "pactionTimestampRequest\032*.hbase.pb.Major" + - "CompactionTimestampResponse\022\212\001\n(getLastM", - "ajorCompactionTimestampForRegion\0222.hbase" + - ".pb.MajorCompactionTimestampForRegionReq" + - "uest\032*.hbase.pb.MajorCompactionTimestamp" + - "Response\022_\n\022getProcedureResult\022#.hbase.p" + - "b.GetProcedureResultRequest\032$.hbase.pb.G" + - "etProcedureResultResponse\022h\n\027getSecurity" + - "Capabilities\022%.hbase.pb.SecurityCapabili" + - "tiesRequest\032&.hbase.pb.SecurityCapabilit" + - "iesResponse\022S\n\016AbortProcedure\022\037.hbase.pb" + - ".AbortProcedureRequest\032 .hbase.pb.AbortP", - "rocedureResponse\022S\n\016ListProcedures\022\037.hba" + - "se.pb.ListProceduresRequest\032 .hbase.pb.L" + - "istProceduresResponseBB\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\014MasterProto" + - "sH\001\210\001\001\240\001\001" + "\010\"\204\001\n\035SetSplitOrMergeEnabledRequest\022\017\n\007e" + + "nabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swi" + + "tch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchT" + + "ype\022\013\n\003key\030\004 \001(\t\"4\n\036SetSplitOrMergeEnabl" + + "edResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSpli" + + "tOrMergeEnabledRequest\022/\n\013switch_type\030\001 " + + "\002(\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSpl" + + "itOrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(", + "\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeRespon" + + "se\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormali" + + "zerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNorm" + + "alizerRunningResponse\022\035\n\025prev_normalizer" + + "_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabledReque" + + "st\".\n\033IsNormalizerEnabledResponse\022\017\n\007ena" + + "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R" + + "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001" + + "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" + + "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons", + "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" + + "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" + + "edResponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReq" + + "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" + + "otDescription\",\n\020SnapshotResponse\022\030\n\020exp" + + "ected_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnaps" + + "hotsRequest\"Q\n\035GetCompletedSnapshotsResp" + + "onse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snaps" + + "hotDescription\"H\n\025DeleteSnapshotRequest\022" + + "/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDes", + "cription\"\030\n\026DeleteSnapshotResponse\"I\n\026Re" + + "storeSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035" + + ".hbase.pb.SnapshotDescription\"\031\n\027Restore" + + "SnapshotResponse\"H\n\025IsSnapshotDoneReques" + + "t\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotD" + + "escription\"^\n\026IsSnapshotDoneResponse\022\023\n\004" + + "done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hb" + + "ase.pb.SnapshotDescription\"O\n\034IsRestoreS" + + "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" + + "base.pb.SnapshotDescription\"4\n\035IsRestore", + "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" + + "e\"F\n\033GetSchemaAlterStatusRequest\022\'\n\ntabl" + + "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034Get" + + "SchemaAlterStatusResponse\022\035\n\025yet_to_upda" + + "te_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"" + + "\213\001\n\032GetTableDescriptorsRequest\022(\n\013table_" + + "names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005rege" + + "x\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fal" + + "se\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescrip" + + "torsResponse\022+\n\014table_schema\030\001 \003(\0132\025.hba", + "se.pb.TableSchema\"[\n\024GetTableNamesReques" + + "t\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002" + + " \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTab" + + "leNamesResponse\022(\n\013table_names\030\001 \003(\0132\023.h" + + "base.pb.TableName\"?\n\024GetTableStateReques" + + "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" + + "me\"B\n\025GetTableStateResponse\022)\n\013table_sta" + + "te\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027GetClu" + + "sterStatusRequest\"K\n\030GetClusterStatusRes" + + "ponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb", + ".ClusterStatus\"\030\n\026IsMasterRunningRequest" + + "\"4\n\027IsMasterRunningResponse\022\031\n\021is_master" + + "_running\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221" + + "\n\tprocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDe" + + "scription\"F\n\025ExecProcedureResponse\022\030\n\020ex" + + "pected_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(" + + "\014\"K\n\026IsProcedureDoneRequest\0221\n\tprocedure" + + "\030\001 \001(\0132\036.hbase.pb.ProcedureDescription\"`" + + "\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:" + + "\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Proc", + "edureDescription\",\n\031GetProcedureResultRe" + + "quest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureR" + + "esultResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb." + + "GetProcedureResultResponse.State\022\022\n\nstar" + + "t_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006res" + + "ult\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb." + + "ForeignExceptionMessage\"1\n\005State\022\r\n\tNOT_" + + "FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025Ab" + + "ortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025" + + "mayInterruptIfRunning\030\002 \001(\010:\004true\"6\n\026Abo", + "rtProcedureResponse\022\034\n\024is_procedure_abor" + + "ted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026Li" + + "stProceduresResponse\022&\n\tprocedure\030\001 \003(\0132" + + "\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRequest" + + "\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022" + + "\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023" + + ".hbase.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022" + + "\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(" + + "\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQuota" + + "Response\"J\n\037MajorCompactionTimestampRequ", + "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" + + "Name\"U\n(MajorCompactionTimestampForRegio" + + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" + + "onSpecifier\"@\n MajorCompactionTimestampR" + + "esponse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n" + + "\033SecurityCapabilitiesRequest\"\354\001\n\034Securit" + + "yCapabilitiesResponse\022G\n\014capabilities\030\001 " + + "\003(\01621.hbase.pb.SecurityCapabilitiesRespo" + + "nse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_" + + "AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATIO", + "N\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZA" + + "TION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwi" + + "tchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMaster" + + "Service\022e\n\024GetSchemaAlterStatus\022%.hbase." + + "pb.GetSchemaAlterStatusRequest\032&.hbase.p" + + "b.GetSchemaAlterStatusResponse\022b\n\023GetTab" + + "leDescriptors\022$.hbase.pb.GetTableDescrip" + + "torsRequest\032%.hbase.pb.GetTableDescripto" + + "rsResponse\022P\n\rGetTableNames\022\036.hbase.pb.G" + + "etTableNamesRequest\032\037.hbase.pb.GetTableN", + "amesResponse\022Y\n\020GetClusterStatus\022!.hbase" + + ".pb.GetClusterStatusRequest\032\".hbase.pb.G" + + "etClusterStatusResponse\022V\n\017IsMasterRunni" + + "ng\022 .hbase.pb.IsMasterRunningRequest\032!.h" + + "base.pb.IsMasterRunningResponse\022D\n\tAddCo" + + "lumn\022\032.hbase.pb.AddColumnRequest\032\033.hbase" + + ".pb.AddColumnResponse\022M\n\014DeleteColumn\022\035." + + "hbase.pb.DeleteColumnRequest\032\036.hbase.pb." + + "DeleteColumnResponse\022M\n\014ModifyColumn\022\035.h" + + "base.pb.ModifyColumnRequest\032\036.hbase.pb.M", + "odifyColumnResponse\022G\n\nMoveRegion\022\033.hbas" + + "e.pb.MoveRegionRequest\032\034.hbase.pb.MoveRe" + + "gionResponse\022k\n\026DispatchMergingRegions\022\'" + + ".hbase.pb.DispatchMergingRegionsRequest\032" + + "(.hbase.pb.DispatchMergingRegionsRespons" + + "e\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegio" + + "nRequest\032\036.hbase.pb.AssignRegionResponse" + + "\022S\n\016UnassignRegion\022\037.hbase.pb.UnassignRe" + + "gionRequest\032 .hbase.pb.UnassignRegionRes" + + "ponse\022P\n\rOfflineRegion\022\036.hbase.pb.Offlin", + "eRegionRequest\032\037.hbase.pb.OfflineRegionR" + + "esponse\022J\n\013DeleteTable\022\034.hbase.pb.Delete" + + "TableRequest\032\035.hbase.pb.DeleteTableRespo" + + "nse\022P\n\rtruncateTable\022\036.hbase.pb.Truncate" + + "TableRequest\032\037.hbase.pb.TruncateTableRes" + + "ponse\022J\n\013EnableTable\022\034.hbase.pb.EnableTa" + + "bleRequest\032\035.hbase.pb.EnableTableRespons" + + "e\022M\n\014DisableTable\022\035.hbase.pb.DisableTabl" + + "eRequest\032\036.hbase.pb.DisableTableResponse" + + "\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTableRe", + "quest\032\035.hbase.pb.ModifyTableResponse\022J\n\013" + + "CreateTable\022\034.hbase.pb.CreateTableReques" + + "t\032\035.hbase.pb.CreateTableResponse\022A\n\010Shut" + + "down\022\031.hbase.pb.ShutdownRequest\032\032.hbase." + + "pb.ShutdownResponse\022G\n\nStopMaster\022\033.hbas" + + "e.pb.StopMasterRequest\032\034.hbase.pb.StopMa" + + "sterResponse\022>\n\007Balance\022\030.hbase.pb.Balan" + + "ceRequest\032\031.hbase.pb.BalanceResponse\022_\n\022" + + "SetBalancerRunning\022#.hbase.pb.SetBalance" + + "rRunningRequest\032$.hbase.pb.SetBalancerRu", + "nningResponse\022\\\n\021IsBalancerEnabled\022\".hba" + + "se.pb.IsBalancerEnabledRequest\032#.hbase.p" + + "b.IsBalancerEnabledResponse\022k\n\026SetSplitO" + + "rMergeEnabled\022\'.hbase.pb.SetSplitOrMerge" + + "EnabledRequest\032(.hbase.pb.SetSplitOrMerg" + + "eEnabledResponse\022h\n\025IsSplitOrMergeEnable" + + "d\022&.hbase.pb.IsSplitOrMergeEnabledReques" + + "t\032\'.hbase.pb.IsSplitOrMergeEnabledRespon" + + "se\022D\n\tNormalize\022\032.hbase.pb.NormalizeRequ" + + "est\032\033.hbase.pb.NormalizeResponse\022e\n\024SetN", + "ormalizerRunning\022%.hbase.pb.SetNormalize" + + "rRunningRequest\032&.hbase.pb.SetNormalizer" + + "RunningResponse\022b\n\023IsNormalizerEnabled\022$" + + ".hbase.pb.IsNormalizerEnabledRequest\032%.h" + + "base.pb.IsNormalizerEnabledResponse\022S\n\016R" + + "unCatalogScan\022\037.hbase.pb.RunCatalogScanR" + + "equest\032 .hbase.pb.RunCatalogScanResponse" + + "\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Enab" + + "leCatalogJanitorRequest\032&.hbase.pb.Enabl" + + "eCatalogJanitorResponse\022n\n\027IsCatalogJani", + "torEnabled\022(.hbase.pb.IsCatalogJanitorEn" + + "abledRequest\032).hbase.pb.IsCatalogJanitor" + + "EnabledResponse\022^\n\021ExecMasterService\022#.h" + + "base.pb.CoprocessorServiceRequest\032$.hbas" + + "e.pb.CoprocessorServiceResponse\022A\n\010Snaps" + + "hot\022\031.hbase.pb.SnapshotRequest\032\032.hbase.p" + + "b.SnapshotResponse\022h\n\025GetCompletedSnapsh" + + "ots\022&.hbase.pb.GetCompletedSnapshotsRequ" + + "est\032\'.hbase.pb.GetCompletedSnapshotsResp" + + "onse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delete", + "SnapshotRequest\032 .hbase.pb.DeleteSnapsho" + + "tResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.I" + + "sSnapshotDoneRequest\032 .hbase.pb.IsSnapsh" + + "otDoneResponse\022V\n\017RestoreSnapshot\022 .hbas" + + "e.pb.RestoreSnapshotRequest\032!.hbase.pb.R" + + "estoreSnapshotResponse\022h\n\025IsRestoreSnaps" + + "hotDone\022&.hbase.pb.IsRestoreSnapshotDone" + + "Request\032\'.hbase.pb.IsRestoreSnapshotDone" + + "Response\022P\n\rExecProcedure\022\036.hbase.pb.Exe" + + "cProcedureRequest\032\037.hbase.pb.ExecProcedu", + "reResponse\022W\n\024ExecProcedureWithRet\022\036.hba" + + "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" + + "ecProcedureResponse\022V\n\017IsProcedureDone\022 " + + ".hbase.pb.IsProcedureDoneRequest\032!.hbase" + + ".pb.IsProcedureDoneResponse\022V\n\017ModifyNam" + + "espace\022 .hbase.pb.ModifyNamespaceRequest" + + "\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017C" + + "reateNamespace\022 .hbase.pb.CreateNamespac" + + "eRequest\032!.hbase.pb.CreateNamespaceRespo" + + "nse\022V\n\017DeleteNamespace\022 .hbase.pb.Delete", + "NamespaceRequest\032!.hbase.pb.DeleteNamesp" + + "aceResponse\022k\n\026GetNamespaceDescriptor\022\'." + + "hbase.pb.GetNamespaceDescriptorRequest\032(" + + ".hbase.pb.GetNamespaceDescriptorResponse" + + "\022q\n\030ListNamespaceDescriptors\022).hbase.pb." + + "ListNamespaceDescriptorsRequest\032*.hbase." + + "pb.ListNamespaceDescriptorsResponse\022\206\001\n\037" + + "ListTableDescriptorsByNamespace\0220.hbase." + + "pb.ListTableDescriptorsByNamespaceReques" + + "t\0321.hbase.pb.ListTableDescriptorsByNames", + "paceResponse\022t\n\031ListTableNamesByNamespac" + + "e\022*.hbase.pb.ListTableNamesByNamespaceRe" + + "quest\032+.hbase.pb.ListTableNamesByNamespa" + + "ceResponse\022P\n\rGetTableState\022\036.hbase.pb.G" + + "etTableStateRequest\032\037.hbase.pb.GetTableS" + + "tateResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQ" + + "uotaRequest\032\032.hbase.pb.SetQuotaResponse\022" + + "x\n\037getLastMajorCompactionTimestamp\022).hba" + + "se.pb.MajorCompactionTimestampRequest\032*." + + "hbase.pb.MajorCompactionTimestampRespons", + "e\022\212\001\n(getLastMajorCompactionTimestampFor" + + "Region\0222.hbase.pb.MajorCompactionTimesta" + + "mpForRegionRequest\032*.hbase.pb.MajorCompa" + + "ctionTimestampResponse\022_\n\022getProcedureRe" + + "sult\022#.hbase.pb.GetProcedureResultReques" + + "t\032$.hbase.pb.GetProcedureResultResponse\022" + + "h\n\027getSecurityCapabilities\022%.hbase.pb.Se" + + "curityCapabilitiesRequest\032&.hbase.pb.Sec" + + "urityCapabilitiesResponse\022S\n\016AbortProced" + + "ure\022\037.hbase.pb.AbortProcedureRequest\032 .h", + "base.pb.AbortProcedureResponse\022S\n\016ListPr" + + "ocedures\022\037.hbase.pb.ListProceduresReques" + + "t\032 .hbase.pb.ListProceduresResponseBB\n*o" + + "rg.apache.hadoop.hbase.protobuf.generate" + + "dB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -65139,7 +65304,7 @@ public final class MasterProtos { internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, - new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); + new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", "Key", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 0240a67..c25acb7 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -8640,6 +8640,1059 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState) } + public interface SplitAndMergeStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional bool split_enabled = 1; + /** + * optional bool split_enabled = 1; + */ + boolean hasSplitEnabled(); + /** + * optional bool split_enabled = 1; + */ + boolean getSplitEnabled(); + + // optional bool merge_enabled = 2; + /** + * optional bool merge_enabled = 2; + */ + boolean hasMergeEnabled(); + /** + * optional bool merge_enabled = 2; + */ + boolean getMergeEnabled(); + } + /** + * Protobuf type {@code hbase.pb.SplitAndMergeState} + * + *
+   **
+   * State for split and merge, used in hbck
+   * 
+ */ + public static final class SplitAndMergeState extends + com.google.protobuf.GeneratedMessage + implements SplitAndMergeStateOrBuilder { + // Use SplitAndMergeState.newBuilder() to construct. + private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SplitAndMergeState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SplitAndMergeState defaultInstance; + public static SplitAndMergeState getDefaultInstance() { + return defaultInstance; + } + + public SplitAndMergeState getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitAndMergeState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + splitEnabled_ = input.readBool(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mergeEnabled_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SplitAndMergeState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SplitAndMergeState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional bool split_enabled = 1; + public static final int SPLIT_ENABLED_FIELD_NUMBER = 1; + private boolean splitEnabled_; + /** + * optional bool split_enabled = 1; + */ + public boolean hasSplitEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool split_enabled = 1; + */ + public boolean getSplitEnabled() { + return splitEnabled_; + } + + // optional bool merge_enabled = 2; + public static final int MERGE_ENABLED_FIELD_NUMBER = 2; + private boolean mergeEnabled_; + /** + * optional bool merge_enabled = 2; + */ + public boolean hasMergeEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool merge_enabled = 2; + */ + public boolean getMergeEnabled() { + return mergeEnabled_; + } + + private void initFields() { + splitEnabled_ = false; + mergeEnabled_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, splitEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mergeEnabled_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, splitEnabled_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mergeEnabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) obj; + + boolean result = true; + result = result && (hasSplitEnabled() == other.hasSplitEnabled()); + if (hasSplitEnabled()) { + result = result && (getSplitEnabled() + == other.getSplitEnabled()); + } + result = result && (hasMergeEnabled() == other.hasMergeEnabled()); + if (hasMergeEnabled()) { + result = result && (getMergeEnabled() + == other.getMergeEnabled()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSplitEnabled()) { + hash = (37 * hash) + SPLIT_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSplitEnabled()); + } + if (hasMergeEnabled()) { + hash = (37 * hash) + MERGE_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMergeEnabled()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SplitAndMergeState} + * + *
+     **
+     * State for split and merge, used in hbck
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + splitEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + mergeEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.splitEnabled_ = splitEnabled_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mergeEnabled_ = mergeEnabled_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance()) return this; + if (other.hasSplitEnabled()) { + setSplitEnabled(other.getSplitEnabled()); + } + if (other.hasMergeEnabled()) { + setMergeEnabled(other.getMergeEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional bool split_enabled = 1; + private boolean splitEnabled_ ; + /** + * optional bool split_enabled = 1; + */ + public boolean hasSplitEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional bool split_enabled = 1; + */ + public boolean getSplitEnabled() { + return splitEnabled_; + } + /** + * optional bool split_enabled = 1; + */ + public Builder setSplitEnabled(boolean value) { + bitField0_ |= 0x00000001; + splitEnabled_ = value; + onChanged(); + return this; + } + /** + * optional bool split_enabled = 1; + */ + public Builder clearSplitEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + splitEnabled_ = false; + onChanged(); + return this; + } + + // optional bool merge_enabled = 2; + private boolean mergeEnabled_ ; + /** + * optional bool merge_enabled = 2; + */ + public boolean hasMergeEnabled() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool merge_enabled = 2; + */ + public boolean getMergeEnabled() { + return mergeEnabled_; + } + /** + * optional bool merge_enabled = 2; + */ + public Builder setMergeEnabled(boolean value) { + bitField0_ |= 0x00000002; + mergeEnabled_ = value; + onChanged(); + return this; + } + /** + * optional bool merge_enabled = 2; + */ + public Builder clearMergeEnabled() { + bitField0_ = (bitField0_ & ~0x00000002); + mergeEnabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitAndMergeState) + } + + static { + defaultInstance = new SplitAndMergeState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitAndMergeState) + } + + public interface SplitOrMergeLockKeyOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string key = 1; + /** + * optional string key = 1; + */ + boolean hasKey(); + /** + * optional string key = 1; + */ + java.lang.String getKey(); + /** + * optional string key = 1; + */ + com.google.protobuf.ByteString + getKeyBytes(); + } + /** + * Protobuf type {@code hbase.pb.SplitOrMergeLockKey} + * + *
+   **
+   * The key to open SplitOrMerge Lock.
+   * 
+ */ + public static final class SplitOrMergeLockKey extends + com.google.protobuf.GeneratedMessage + implements SplitOrMergeLockKeyOrBuilder { + // Use SplitOrMergeLockKey.newBuilder() to construct. + private SplitOrMergeLockKey(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SplitOrMergeLockKey(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SplitOrMergeLockKey defaultInstance; + public static SplitOrMergeLockKey getDefaultInstance() { + return defaultInstance; + } + + public SplitOrMergeLockKey getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SplitOrMergeLockKey( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + key_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitOrMergeLockKey_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitOrMergeLockKey_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SplitOrMergeLockKey parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SplitOrMergeLockKey(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string key = 1; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * optional string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * optional string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey) obj; + + boolean result = true; + result = result && (hasKey() == other.hasKey()); + if (hasKey()) { + result = result && getKey() + .equals(other.getKey()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasKey()) { + hash = (37 * hash) + KEY_FIELD_NUMBER; + hash = (53 * hash) + getKey().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SplitOrMergeLockKey} + * + *
+     **
+     * The key to open SplitOrMerge Lock.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKeyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitOrMergeLockKey_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitOrMergeLockKey_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitOrMergeLockKey_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitOrMergeLockKey) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string key = 1; + private java.lang.Object key_ = ""; + /** + * optional string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string key = 1; + */ + public com.google.protobuf.ByteString + getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string key = 1; + */ + public Builder setKey( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * optional string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * optional string key = 1; + */ + public Builder setKeyBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SplitOrMergeLockKey) + } + + static { + defaultInstance = new SplitOrMergeLockKey(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SplitOrMergeLockKey) + } + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_MetaRegionServer_descriptor; private static @@ -8695,6 +9748,16 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SwitchState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitAndMergeState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SplitOrMergeLockKey_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SplitOrMergeLockKey_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -8736,9 +9799,12 @@ public final class ZooKeeperProtos { "e\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.pb.ServerN", "ame\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(" + "\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"" + - "\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010BE\n*org.a" + - "pache.hadoop.hbase.protobuf.generatedB\017Z" + - "ooKeeperProtosH\001\210\001\001\240\001\001" + "\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010\"B\n\022Split" + + "AndMergeState\022\025\n\rsplit_enabled\030\001 \001(\010\022\025\n\r" + + "merge_enabled\030\002 \001(\010\"\"\n\023SplitOrMergeLockK" + + "ey\022\013\n\003key\030\001 \001(\tBE\n*org.apache.hadoop.hba" + + "se.protobuf.generatedB\017ZooKeeperProtosH\001" + + "\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8811,6 +9877,18 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SwitchState_descriptor, new java.lang.String[] { "Enabled", }); + internal_static_hbase_pb_SplitAndMergeState_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SplitAndMergeState_descriptor, + new java.lang.String[] { "SplitEnabled", "MergeEnabled", }); + internal_static_hbase_pb_SplitOrMergeLockKey_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_SplitOrMergeLockKey_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SplitOrMergeLockKey_descriptor, + new java.lang.String[] { "Key", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 79bb862..4972a8c 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -288,6 +288,7 @@ message SetSplitOrMergeEnabledRequest { required bool enabled = 1; optional bool synchronous = 2; repeated MasterSwitchType switch_types = 3; + optional string key = 4; } message SetSplitOrMergeEnabledResponse { diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 4963c09..1f9ec3e 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -159,4 +159,19 @@ message TableLock { */ message SwitchState { optional bool enabled = 1; +} + +/** + * State for split and merge, used in hbck + */ +message SplitAndMergeState { + optional bool split_enabled = 1; + optional bool merge_enabled = 2; +} + +/** + * The key to open SplitOrMerge Lock. + */ +message SplitOrMergeLockKey { + optional string key = 1; } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index d6a53cd..32f7da6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -1513,16 +1514,21 @@ public class MasterRpcServices extends RSRpcServices try { master.checkInitialized(); boolean newValue = request.getEnabled(); + String key = request.getKey(); for (MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { Admin.MasterSwitchType switchType = convert(masterSwitchType); boolean oldValue = master.isSplitOrMergeEnabled(switchType); - master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType); + master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, key, switchType); response.addPrevValue(oldValue); } } catch (IOException e) { throw new ServiceException(e); } catch (KeeperException e) { throw new ServiceException(e); + } catch (InterruptedException e) { + throw new ServiceException(e); + } catch (DeserializationException e) { + throw new ServiceException(e); } return response.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9abef9c..51b4a0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -54,6 +54,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -109,12 +110,14 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -133,6 +136,7 @@ import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; @@ -307,6 +311,7 @@ public class HBaseFsck extends Configured implements Closeable { private Map> skippedRegions = new HashMap>(); + ZooKeeperWatcher zkw = null; /** * Constructor * @@ -345,6 +350,7 @@ public class HBaseFsck extends Configured implements Closeable { "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), getConf().getInt( "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + zkw = createZooKeeperWatcher(); } private class FileLockCallable implements Callable { @@ -679,15 +685,23 @@ public class HBaseFsck extends Configured implements Closeable { // print hbase server version errors.print("Version: " + status.getHBaseVersion()); offlineHdfsIntegrityRepair(); + cleanUpSwitches(); boolean oldBalancer = false; if (shouldDisableBalancer()) { oldBalancer = admin.setBalancerRunning(false, true); } boolean[] oldSplitAndMerge = null; + String splitOrMergeKey = null; if (shouldDisableSplitAndMerge()) { - oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + splitOrMergeKey = acquireSplitOrMergeLock(); + if (splitOrMergeKey != null) { + oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, splitOrMergeKey, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + saveSplitOrMergeStateOnZK(oldSplitAndMerge); + } else { + LOG.warn("We can't acquire the lock of splitOrMerge!"); + } } try { @@ -702,15 +716,10 @@ public class HBaseFsck extends Configured implements Closeable { } if (shouldDisableSplitAndMerge()) { - if (oldSplitAndMerge != null) { - if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, - Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); - } else if (oldSplitAndMerge[0]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.SPLIT); - } else if (oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, Admin.MasterSwitchType.MERGE); - } + if (oldSplitAndMerge != null && splitOrMergeKey != null) { + rollbackSplitOrMergeSwitches(oldSplitAndMerge[0], oldSplitAndMerge[1], splitOrMergeKey); + deleteSplitOrMergeStateOnZK(); + releaseSplitOrMergeLock(); } } } @@ -749,6 +758,10 @@ public class HBaseFsck extends Configured implements Closeable { } catch (Exception io) { LOG.warn(io); } finally { + if (zkw != null) { + zkw.close(); + zkw = null; + } IOUtils.closeQuietly(admin); IOUtils.closeQuietly(meta); IOUtils.closeQuietly(connection); @@ -1789,14 +1802,7 @@ public class HBaseFsck extends Configured implements Closeable { private ServerName getMetaRegionServerName(int replicaId) throws IOException, KeeperException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - ServerName sn = null; - try { - sn = new MetaTableLocator().getMetaRegionLocation(zkw, replicaId); - } finally { - zkw.close(); - } - return sn; + return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId); } /** @@ -3281,28 +3287,21 @@ public class HBaseFsck extends Configured implements Closeable { } private void checkAndFixTableLocks() throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); TableLockChecker checker = new TableLockChecker(zkw, errors); checker.checkTableLocks(); if (this.fixTableLocks) { checker.fixExpiredTableLocks(); } - zkw.close(); } private void checkAndFixReplication() throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); - checker.checkUnDeletedQueues(); + ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors); + checker.checkUnDeletedQueues(); - if (checker.hasUnDeletedQueues() && this.fixReplication) { - checker.fixUnDeletedQueues(); - setShouldRerun(); - } - } finally { - zkw.close(); + if (checker.hasUnDeletedQueues() && this.fixReplication) { + checker.fixUnDeletedQueues(); + setShouldRerun(); } } @@ -3372,12 +3371,7 @@ public class HBaseFsck extends Configured implements Closeable { private void unassignMetaReplica(HbckInfo hi) throws IOException, InterruptedException, KeeperException { undeployRegions(hi); - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId())); - } finally { - zkw.close(); - } + ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(hi.metaEntry.getReplicaId())); } private void assignMetaReplica(int replicaId) @@ -4206,7 +4200,12 @@ public class HBaseFsck extends Configured implements Closeable { * Disable the split and merge */ public static void setDisableSplitAndMerge() { - disableSplitAndMerge = true; + setDisableSplitAndMerge(true); + } + + @VisibleForTesting + public static void setDisableSplitAndMerge(boolean flag) { + disableSplitAndMerge = flag; } /** @@ -4227,6 +4226,96 @@ public class HBaseFsck extends Configured implements Closeable { return fixAny || disableSplitAndMerge; } + + private void cleanUpSwitches() { + try { + releaseSplitOrMergeLock(); + String splitOrMergeState = ZKUtil.joinZNode(zkw.getSwitchLockZNode(), + SplitOrMergeTracker.STATE); + // If old state exist on zk, we do rollback. + if (ZKUtil.checkExists(zkw, splitOrMergeState) != -1) { + byte[] bytes = ZKUtil.getData(zkw, splitOrMergeState); + ProtobufUtil.expectPBMagicPrefix(bytes); + ZooKeeperProtos.SplitAndMergeState.Builder builder = + ZooKeeperProtos.SplitAndMergeState.newBuilder(); + try { + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen); + } catch (IOException e) { + throw new DeserializationException(e); + } + ZooKeeperProtos.SplitAndMergeState splitAndMergeState = builder.build(); + rollbackSplitOrMergeSwitches(splitAndMergeState.hasSplitEnabled(), + splitAndMergeState.hasMergeEnabled(), ""); + ZKUtil.deleteNode(zkw, splitOrMergeState); + } + } catch (KeeperException e) { + LOG.warn("rollback failed!", e); + } catch (InterruptedException e) { + LOG.warn("rollback abort!", e); + } catch (IOException e) { + LOG.warn("rollback failed!", e); + } catch (DeserializationException e) { + LOG.warn("rollback failed!", e); + } + } + + private void rollbackSplitOrMergeSwitches(boolean split, boolean merge, + String key) throws IOException { + if (split && merge) { + admin.setSplitOrMergeEnabled(true, false, key, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + } else if (split) { + admin.setSplitOrMergeEnabled(true, false, key, Admin.MasterSwitchType.SPLIT); + } else if (merge) { + admin.setSplitOrMergeEnabled(true, false, key, Admin.MasterSwitchType.MERGE); + } + } + + private String acquireSplitOrMergeLock() { + String splitOrMergeLock = ZKUtil.joinZNode(zkw.getSwitchLockZNode(), SplitOrMergeTracker.LOCK); + try { + // with the key, you can open the lock. + String key = RandomStringUtils.randomAlphanumeric(10); + ZooKeeperProtos.SplitOrMergeLockKey.Builder builder = + ZooKeeperProtos.SplitOrMergeLockKey.newBuilder(); + builder.setKey(key); + ZKUtil.createSetData(zkw, splitOrMergeLock, + ProtobufUtil.prependPBMagic(builder.build().toByteArray())); + return key; + } catch (KeeperException e) { + LOG.warn("acquire lock failed!", e); + } + return null; + } + + private void releaseSplitOrMergeLock() throws KeeperException { + String splitOrMergeLock = ZKUtil.joinZNode(zkw.getSwitchLockZNode(), SplitOrMergeTracker.LOCK); + if (ZKUtil.checkExists(zkw, splitOrMergeLock) != -1) { + ZKUtil.deleteNode(zkw, splitOrMergeLock); + LOG.info("There is old splitOrMerge Lock left, clean it successfully!"); + } + } + + private void deleteSplitOrMergeStateOnZK() throws KeeperException { + String splitOrMergeState = ZKUtil.joinZNode(zkw.getSwitchLockZNode(), + SplitOrMergeTracker.STATE); + if (ZKUtil.checkExists(zkw, splitOrMergeState) != -1) { + ZKUtil.deleteNode(zkw, splitOrMergeState); + } + } + + private void saveSplitOrMergeStateOnZK(boolean[] status) throws KeeperException { + String splitOrMergeStates = ZKUtil.joinZNode(zkw.getSwitchLockZNode(), + SplitOrMergeTracker.STATE); + ZooKeeperProtos.SplitAndMergeState.Builder builder + = ZooKeeperProtos.SplitAndMergeState.newBuilder(); + builder.setSplitEnabled(status[0]); + builder.setMergeEnabled(status[1]); + ZKUtil.createSetData(zkw, splitOrMergeStates, + ProtobufUtil.prependPBMagic(builder.build().toByteArray())); + } + /** * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java index 0d729a1..3a8b2a6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java @@ -24,7 +24,9 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; @@ -37,8 +39,13 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class SplitOrMergeTracker { + public static final String LOCK = "splitOrMergeLock"; + public static final String STATE = "splitOrMergeState"; + private String splitZnode; private String mergeZnode; + private String splitOrMergeLock; + private ZooKeeperWatcher watcher; private SwitchStateTracker splitStateTracker; private SwitchStateTracker mergeStateTracker; @@ -49,6 +56,9 @@ public class SplitOrMergeTracker { if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) { ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode()); } + if (ZKUtil.checkExists(watcher, watcher.getSwitchLockZNode()) < 0) { + ZKUtil.createAndFailSilent(watcher, watcher.getSwitchLockZNode()); + } } catch (KeeperException e) { throw new RuntimeException(e); } @@ -56,8 +66,12 @@ public class SplitOrMergeTracker { conf.get("zookeeper.znode.switch.split", "split")); mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(), conf.get("zookeeper.znode.switch.merge", "merge")); + + splitOrMergeLock = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), LOCK); + splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable); mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable); + this.watcher = watcher; } public void start() { @@ -77,8 +91,12 @@ public class SplitOrMergeTracker { return false; } - public void setSplitOrMergeEnabled(boolean enabled, Admin.MasterSwitchType switchType) - throws KeeperException { + public void setSplitOrMergeEnabled(boolean enabled, String key, Admin.MasterSwitchType switchType) + throws KeeperException, IOException, DeserializationException, + InterruptedException { + if (!checkLock(key)) { + throw new IOException("Can't set splitOrMerge switch due to old lock!"); + } switch (switchType) { case SPLIT: splitStateTracker.setSwitchEnabled(enabled); @@ -91,6 +109,31 @@ public class SplitOrMergeTracker { } } + /** + * If there is no lock, you can go on next. + * If there is one lock, we should to check the key is suitable for the lock, if not, you can't + * go on. + * + * @return true, you pass the check. otherwise, false + * */ + private boolean checkLock(String key) throws KeeperException, InterruptedException, + DeserializationException, IOException { + if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { + byte[] bytes = ZKUtil.getData(watcher, splitOrMergeLock); + ProtobufUtil.expectPBMagicPrefix(bytes); + ZooKeeperProtos.SplitOrMergeLockKey.Builder builder = + ZooKeeperProtos.SplitOrMergeLockKey.newBuilder(); + int magicLen = ProtobufUtil.lengthOfPBMagic(); + ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen); + ZooKeeperProtos.SplitOrMergeLockKey splitOrMergeLockKey = builder.build(); + if (!StringUtils.isEmpty(key) && key.equals(splitOrMergeLockKey.getKey())) { + return true; + } + return false; + } + return true; + } + private static class SwitchStateTracker extends ZooKeeperNodeTracker { public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index b1864d2..9eafe49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -60,6 +61,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.Assert; @@ -68,6 +70,8 @@ import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; import java.io.IOException; import java.util.ArrayList; @@ -88,6 +92,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*; import static org.junit.Assert.*; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.spy; @Category({MiscTests.class, LargeTests.class}) public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { @@ -1840,4 +1846,52 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); hbck.close(); } + + /** + * See HBASE-15406 + * */ + @Test + public void testSplitOrMergeStatWhenHBCKAbort() throws Exception { + admin.setSplitOrMergeEnabled(true, false, + Admin.MasterSwitchType.SPLIT, Admin.MasterSwitchType.MERGE); + boolean oldSplit = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + boolean oldMerge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + + assertTrue(oldSplit); + assertTrue(oldMerge); + + ExecutorService exec = new ScheduledThreadPoolExecutor(10); + HBaseFsck hbck = new HBaseFsck(conf, exec); + HBaseFsck.setDisplayFullReport(); // i.e. -details + final HBaseFsck spiedHbck = spy(hbck); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back. + spiedHbck.setDisableSplitAndMerge(false); + return null; + } + }).when(spiedHbck).onlineConsistencyRepair(); + spiedHbck.setDisableSplitAndMerge(); + spiedHbck.connect(); + spiedHbck.onlineHbck(); + spiedHbck.close(); + + boolean split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + boolean merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + assertFalse(split); + assertFalse(merge); + + // rerun hbck to repair the switches state + hbck = new HBaseFsck(conf, exec); + hbck.connect(); + hbck.onlineHbck(); + hbck.close(); + + split = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT); + merge = admin.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE); + + assertTrue(split); + assertTrue(merge); + } } -- 1.9.3 (Apple Git-50)