diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 99fe157..436cf70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -34,6 +34,7 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.regex.Matcher; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -75,8 +76,9 @@ public class HTableDescriptor implements WritableComparable { * Version 5 removed transactional pollution -- e.g. indexes * Version 6 changed metadata to BytesBytesPair in PB * Version 7 adds table-level configuration + * Version 8 adds table state (enabled/disabled) */ - private static final byte TABLE_DESCRIPTOR_VERSION = 7; + private static final byte TABLE_DESCRIPTOR_VERSION = 8; private TableName name = null; @@ -165,6 +167,18 @@ public class HTableDescriptor implements WritableComparable { new ImmutableBytesWritable(Bytes.toBytes(IS_META)); /** + * INTERNAL Used by rest interface to access this metadata + * attribute which denotes if the table is enabled or not + * + * @see #isEnabled() + */ + public static final String TABLE_STATE = "TABLE_STATE"; + private static final ImmutableBytesWritable TABLE_STATE_KEY = + new ImmutableBytesWritable(Bytes.toBytes(TABLE_STATE)); + + public static final TableState.State DEFAULT_TABLE_STATE = TableState.State.ENABLED; + + /** * INTERNAL Used by HBase Shell interface to access this metadata * attribute which denotes if the deferred log flush option is enabled. * @deprecated Use {@link #DURABILITY} instead. @@ -236,6 +250,7 @@ public class HTableDescriptor implements WritableComparable { DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH, String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH)); DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name + DEFAULT_VALUES.put(TABLE_STATE, DEFAULT_TABLE_STATE.name()); //use the enum name DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); for (String s : DEFAULT_VALUES.keySet()) { RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); @@ -259,6 +274,11 @@ public class HTableDescriptor implements WritableComparable { private Durability durability = null; /** + * State setting for the table + */ + private TableState.State tableState = null; + + /** * Maps column family name to the respective HColumnDescriptors */ private final Map families = @@ -638,6 +658,37 @@ public class HTableDescriptor implements WritableComparable { } /** + * Sets the {@link org.apache.hadoop.hbase.client.TableState.State} setting for the table. + * @param durability enum value + */ + public void setTableState(TableState.State tableState) { + this.tableState = tableState; + setValue(TABLE_STATE_KEY, tableState.name()); + } + + /** + * Returns the tablestate of the table + * @return tableState setting for the table. + */ + public TableState.State getTableState() { + if (this.tableState == null) { + byte[] v = getValue(TABLE_STATE_KEY); + if (v == null) { + this.tableState = DEFAULT_TABLE_STATE; + } else { + try { + this.tableState = TableState.State.valueOf(Bytes.toString(v)); + } catch (IllegalArgumentException ex) { + LOG.warn("Received " + ex + " because TableState value for HTableDescriptor" + + " is not known. TableState:" + Bytes.toString(v)); + this.tableState = DEFAULT_TABLE_STATE; + } + } + } + return this.tableState; + } + + /** * Get the name of the table * * @return TableName diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 9d1570d..5b4f7c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -170,6 +170,11 @@ class ConnectionAdapter implements ClusterConnection { } @Override + public TableState getTableState(TableName tableName) throws IOException { + return wrappedConnection.getTableState(tableName); + } + + @Override public HTableDescriptor[] listTables() throws IOException { return wrappedConnection.listTables(); } @@ -435,4 +440,4 @@ class ConnectionAdapter implements ClusterConnection { public AsyncProcess getAsyncProcess() { return wrappedConnection.getAsyncProcess(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 0813745..9648c69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -176,6 +176,8 @@ import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * An internal, A non-instantiable class that manages creation of {@link HConnection}s. */ @@ -893,7 +895,7 @@ class ConnectionManager { @Override public boolean isTableEnabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, true); + return getTableState(tableName).getState().equals(TableState.State.ENABLED); } @Override @@ -903,7 +905,7 @@ class ConnectionManager { @Override public boolean isTableDisabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, false); + return getTableState(tableName).getState().equals(TableState.State.DISABLED); } @Override @@ -1993,6 +1995,13 @@ class ConnectionManager { } @Override + public GetTableStateResponse getTableState( + RpcController controller, GetTableStateRequest request) + throws ServiceException { + return stub.getTableState(controller, request); + } + + @Override public void close() { release(this.mss); } @@ -2498,6 +2507,19 @@ class ConnectionManager { throws IOException { return getHTableDescriptor(TableName.valueOf(tableName)); } + + public TableState getTableState(TableName tableName) throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + GetTableStateResponse resp = master.getTableState(null, + RequestConverter.buildGetTableStateRequest(tableName)); + return TableState.convert(resp.getTableState()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index cd11a52..918c944 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -208,6 +208,13 @@ public interface HConnection extends Abortable, Closeable { boolean isTableDisabled(byte[] tableName) throws IOException; /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ + public TableState getTableState(TableName tableName) throws IOException; + + /** * @param tableName table name * @return true if all regions of the table are available, false otherwise * @throws IOException if a remote or network exception occurs @@ -576,4 +583,4 @@ public interface HConnection extends Abortable, Closeable { * @deprecated internal method, do not use thru HConnection */ @Deprecated public NonceGenerator getNonceGenerator(); -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index aab547e..89c8cef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -44,14 +44,8 @@ interface Registry { String getClusterId(); /** - * @param enabled Return true if table is enabled - * @throws IOException - */ - boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; - - /** * @return Count of 'running' regionservers * @throws IOException */ int getCurrentNrHRS() throws IOException; -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java new file mode 100644 index 0000000..dc04ad8 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; + +/** + * Represents table state. + */ +@InterfaceAudience.Private +public class TableState { + + @InterfaceAudience.Public + @InterfaceStability.Evolving + public static enum State { + ENABLED, + DISABLED, + DISABLING, + ENABLING + } + + private final State state; + + public TableState(State state) { + this.state = state; + } + + public State getState() { + return state; + } + + public static ClusterStatusProtos.TableState.State convert(State tableState) { + ClusterStatusProtos.TableState.State state; + switch(tableState) { + case ENABLED: + state = ClusterStatusProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = ClusterStatusProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = ClusterStatusProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = ClusterStatusProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(tableState.toString()); + } + return state; + } + + public static ClusterStatusProtos.TableState convert(TableState tableState) { + ClusterStatusProtos.TableState.State state = convert(tableState.getState()); + return ClusterStatusProtos.TableState.newBuilder() + .setState(state) + .build(); + } + + public static TableState convert(ClusterStatusProtos.TableState tableState) { + TableState.State state = convert(tableState.getState()); + return new TableState(state); + } + + private static State convert(ClusterStatusProtos.TableState.State state) { + State ret; + switch(state) { + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); + } + return ret; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 9123d50..4d3cc3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -18,18 +18,17 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.io.InterruptedIOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; @@ -98,24 +97,6 @@ class ZooKeeperRegistry implements Registry { } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); - try { - if (enabled) { - return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName); - } - return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName); - } catch (KeeperException e) { - throw new IOException("Enable/Disable failed", e); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); - } - } - - @Override public int getCurrentNrHRS() throws IOException { ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); try { @@ -128,4 +109,4 @@ class ZooKeeperRegistry implements Registry { zkw.close(); } } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index d6bcb29..ab764a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.protobuf; import java.io.IOException; import java.util.List; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.classification.InterfaceAudience; @@ -106,6 +107,8 @@ import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.ByteString; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * Helper utility to build protocol buffer requests, * or build components for protocol buffer requests. @@ -1177,6 +1180,19 @@ public final class RequestConverter { } /** + * Creates a protocol buffer GetTableStateRequest + * + * @param tableName table to get request for + * @return a GetTableStateRequest + */ + public static GetTableStateRequest buildGetTableStateRequest( + final TableName tableName) { + return GetTableStateRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); + } + + /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table * * @param tableName the table name diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java deleted file mode 100644 index 94bd31e..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import com.google.protobuf.InvalidProtocolBufferException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Non-instantiable class that provides helper functions to learn - * about HBase table state for code running on client side (hence, not having - * access to consensus context). - * - * Doesn't cache any table state, just goes directly to ZooKeeper. - * TODO: decouple this class from ZooKeeper. - */ -@InterfaceAudience.Private -public class ZKTableStateClientSideReader { - - private ZKTableStateClientSideReader() {} - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} - * of {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || - isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED || - state == ZooKeeperProtos.Table.State.DISABLING) - disabledTables.add(tableName); - } - return disabledTables; - } - - static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && currentState.equals(expectedState); - } - - /** - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 9e878b4..5701470 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -129,12 +129,6 @@ public class TestClientNoCluster extends Configured implements Tool { } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - return enabled; - } - - @Override public int getCurrentNrHRS() throws IOException { return 1; } @@ -813,4 +807,4 @@ public class TestClientNoCluster extends Configured implements Tool { public static void main(String[] args) throws Exception { System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args)); } -} \ No newline at end of file +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index c558485..c2a7de2 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -1087,6 +1087,593 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionState) } + public interface TableStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState.State state = 1 [default = ENABLED]; + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+     * This is the table's state.
+     * 
+ */ + boolean hasState(); + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+     * This is the table's state.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State getState(); + } + /** + * Protobuf type {@code TableState} + */ + public static final class TableState extends + com.google.protobuf.GeneratedMessage + implements TableStateOrBuilder { + // Use TableState.newBuilder() to construct. + private TableState(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableState defaultInstance; + public static TableState getDefaultInstance() { + return defaultInstance; + } + + public TableState getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code TableState.State} + * + *
+     * Table's current state
+     * 
+ */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ENABLED = 0; + */ + ENABLED(0, 0), + /** + * DISABLED = 1; + */ + DISABLED(1, 1), + /** + * DISABLING = 2; + */ + DISABLING(2, 2), + /** + * ENABLING = 3; + */ + ENABLING(3, 3), + ; + + /** + * ENABLED = 0; + */ + public static final int ENABLED_VALUE = 0; + /** + * DISABLED = 1; + */ + public static final int DISABLED_VALUE = 1; + /** + * DISABLING = 2; + */ + public static final int DISABLING_VALUE = 2; + /** + * ENABLING = 3; + */ + public static final int ENABLING_VALUE = 3; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + case 2: return DISABLING; + case 3: return ENABLING; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TableState.State) + } + + private int bitField0_; + // required .TableState.State state = 1 [default = ENABLED]; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State state_; + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+     * This is the table's state.
+     * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+     * This is the table's state.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State getState() { + return state_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State.ENABLED; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableState} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_TableState_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState.State state = 1 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State.ENABLED; + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+       * This is the table's state.
+       * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+       * This is the table's state.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State getState() { + return state_; + } + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + /** + * required .TableState.State state = 1 [default = ENABLED]; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.State.ENABLED; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableState) + } + + static { + defaultInstance = new TableState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableState) + } + public interface RegionInTransitionOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -10305,6 +10892,11 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionState_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionInTransition_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -10347,41 +10939,44 @@ public final class ClusterStatusProtos { "SE\020\004\022\013\n\007CLOSING\020\005\022\n\n\006CLOSED\020\006\022\r\n\tSPLITTI" + "NG\020\007\022\t\n\005SPLIT\020\010\022\017\n\013FAILED_OPEN\020\t\022\020\n\014FAIL" + "ED_CLOSE\020\n\022\013\n\007MERGING\020\013\022\n\n\006MERGED\020\014\022\021\n\rS" + - "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", - "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + - "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + - "e\"\320\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + - "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + - "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + - "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + - "\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027storefile_i" + - "ndex_size_MB\030\007 \001(\r\022\033\n\023read_requests_coun" + - "t\030\010 \001(\004\022\034\n\024write_requests_count\030\t \001(\004\022\034\n" + - "\024total_compacting_KVs\030\n \001(\004\022\035\n\025current_c", - "ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" + - "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + - "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + - "\n\024complete_sequence_id\030\017 \001(\004\"\212\002\n\nServerL" + - "oad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total" + - "_number_of_requests\030\002 \001(\r\022\024\n\014used_heap_M" + - "B\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_l" + - "oads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030" + - "\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_time" + - "\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_", - "server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033\n\006s" + - "erver\030\001 \002(\0132\013.ServerName\022 \n\013server_load\030" + - "\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022/\n\r" + - "hbase_version\030\001 \001(\0132\030.HBaseVersionFileCo" + - "ntent\022%\n\014live_servers\030\002 \003(\0132\017.LiveServer" + - "Info\022!\n\014dead_servers\030\003 \003(\0132\013.ServerName\022" + - "2\n\025regions_in_transition\030\004 \003(\0132\023.RegionI" + - "nTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Cluste" + - "rId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Copro" + - "cessor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#\n\016b", - "ackup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013bala" + - "ncer_on\030\t \001(\010BF\n*org.apache.hadoop.hbase" + - ".protobuf.generatedB\023ClusterStatusProtos" + - "H\001\240\001\001" + "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"x\n\nTable", + "State\022)\n\005state\030\001 \002(\0162\021.TableState.State:" + + "\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABL" + + "ED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"X\n\022Reg" + + "ionInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSp" + + "ecifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionSt" + + "ate\"\320\003\n\nRegionLoad\022*\n\020region_specifier\030\001" + + " \002(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022" + + "\n\nstorefiles\030\003 \001(\r\022\"\n\032store_uncompressed" + + "_size_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(" + + "\r\022\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027storefile", + "_index_size_MB\030\007 \001(\r\022\033\n\023read_requests_co" + + "unt\030\010 \001(\004\022\034\n\024write_requests_count\030\t \001(\004\022" + + "\034\n\024total_compacting_KVs\030\n \001(\004\022\035\n\025current" + + "_compacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_" + + "KB\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r" + + " \001(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r" + + "\022\034\n\024complete_sequence_id\030\017 \001(\004\"\212\002\n\nServe" + + "rLoad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030tot" + + "al_number_of_requests\030\002 \001(\r\022\024\n\014used_heap" + + "_MB\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region", + "_loads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessor" + + "s\030\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_ti" + + "me\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020inf" + + "o_server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033\n" + + "\006server\030\001 \002(\0132\013.ServerName\022 \n\013server_loa" + + "d\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022/" + + "\n\rhbase_version\030\001 \001(\0132\030.HBaseVersionFile" + + "Content\022%\n\014live_servers\030\002 \003(\0132\017.LiveServ" + + "erInfo\022!\n\014dead_servers\030\003 \003(\0132\013.ServerNam" + + "e\0222\n\025regions_in_transition\030\004 \003(\0132\023.Regio", + "nInTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Clus" + + "terId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Cop" + + "rocessor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#\n" + + "\016backup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013ba" + + "lancer_on\030\t \001(\010BF\n*org.apache.hadoop.hba" + + "se.protobuf.generatedB\023ClusterStatusProt" + + "osH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10394,32 +10989,38 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionState_descriptor, new java.lang.String[] { "RegionInfo", "State", "Stamp", }); - internal_static_RegionInTransition_descriptor = + internal_static_TableState_descriptor = getDescriptor().getMessageTypes().get(1); + internal_static_TableState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableState_descriptor, + new java.lang.String[] { "State", }); + internal_static_RegionInTransition_descriptor = + getDescriptor().getMessageTypes().get(2); internal_static_RegionInTransition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInTransition_descriptor, new java.lang.String[] { "Spec", "RegionState", }); internal_static_RegionLoad_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_RegionLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", }); internal_static_ServerLoad_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_ServerLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerLoad_descriptor, new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", }); internal_static_LiveServerInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_LiveServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LiveServerInfo_descriptor, new java.lang.String[] { "Server", "ServerLoad", }); internal_static_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterStatus_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index ee1ab67..8d2614b 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -36655,6 +36655,1128 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:GetTableNamesResponse) } + public interface GetTableStateRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class GetTableStateRequest extends + com.google.protobuf.GeneratedMessage + implements GetTableStateRequestOrBuilder { + // Use GetTableStateRequest.newBuilder() to construct. + private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateRequest defaultInstance; + public static GetTableStateRequest getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateRequest) + } + + static { + defaultInstance = new GetTableStateRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateRequest) + } + + public interface GetTableStateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState table_state = 1; + /** + * required .TableState table_state = 1; + */ + boolean hasTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState getTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder getTableStateOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class GetTableStateResponse extends + com.google.protobuf.GeneratedMessage + implements GetTableStateResponseOrBuilder { + // Use GetTableStateResponse.newBuilder() to construct. + private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateResponse defaultInstance; + public static GetTableStateResponse getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableState_.toBuilder(); + } + tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableState_); + tableState_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableState table_state = 1; + public static final int TABLE_STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState tableState_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState getTableState() { + return tableState_; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder getTableStateOrBuilder() { + return tableState_; + } + + private void initFields() { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableState()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableState().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableState_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableState_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; + + boolean result = true; + result = result && (hasTableState() == other.hasTableState()); + if (hasTableState()) { + result = result && getTableState() + .equals(other.getTableState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableState()) { + hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getTableState().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableStateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableStateBuilder_ == null) { + result.tableState_ = tableState_; + } else { + result.tableState_ = tableStateBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; + if (other.hasTableState()) { + mergeTableState(other.getTableState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableState()) { + + return false; + } + if (!getTableState().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState table_state = 1; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder> tableStateBuilder_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState getTableState() { + if (tableStateBuilder_ == null) { + return tableState_; + } else { + return tableStateBuilder_.getMessage(); + } + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableState_ = value; + onChanged(); + } else { + tableStateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder builderForValue) { + if (tableStateBuilder_ == null) { + tableState_ = builderForValue.build(); + onChanged(); + } else { + tableStateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableState_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance()) { + tableState_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial(); + } else { + tableState_ = value; + } + onChanged(); + } else { + tableStateBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder clearTableState() { + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.getDefaultInstance(); + onChanged(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder getTableStateBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableStateFieldBuilder().getBuilder(); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder getTableStateOrBuilder() { + if (tableStateBuilder_ != null) { + return tableStateBuilder_.getMessageOrBuilder(); + } else { + return tableState_; + } + } + /** + * required .TableState table_state = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder> + getTableStateFieldBuilder() { + if (tableStateBuilder_ == null) { + tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.TableStateOrBuilder>( + tableState_, + getParentForChildren(), + isClean()); + tableState_ = null; + } + return tableStateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateResponse) + } + + static { + defaultInstance = new GetTableStateResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateResponse) + } + public interface GetClusterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -41176,6 +42298,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+       ** returns table state 
+       * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -41525,6 +42659,14 @@ public final class MasterProtos { impl.listTableNamesByNamespace(controller, request, done); } + @java.lang.Override + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableState(controller, request, done); + } + }; } @@ -41633,6 +42775,8 @@ public final class MasterProtos { return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 42: return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + case 43: + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41733,6 +42877,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41833,6 +42979,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42383,6 +43531,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+     ** returns table state 
+     * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -42620,6 +43780,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 43: + this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -42720,6 +43885,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42820,6 +43987,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -43485,6 +44654,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance())); } + + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -43707,6 +44891,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -44231,6 +45420,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -44607,6 +45808,16 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetTableNamesResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetClusterStatusRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -44752,97 +45963,101 @@ public final class MasterProtos { "ponse\022\"\n\014table_schema\030\001 \003(\0132\014.TableSchem" + "a\"\026\n\024GetTableNamesRequest\"8\n\025GetTableNam" + "esResponse\022\037\n\013table_names\030\001 \003(\0132\n.TableN" + - "ame\"\031\n\027GetClusterStatusRequest\"B\n\030GetClu" + - "sterStatusResponse\022&\n\016cluster_status\030\001 \002" + - "(\0132\016.ClusterStatus\"\030\n\026IsMasterRunningReq", - "uest\"4\n\027IsMasterRunningResponse\022\031\n\021is_ma" + - "ster_running\030\001 \002(\010\"@\n\024ExecProcedureReque" + - "st\022(\n\tprocedure\030\001 \002(\0132\025.ProcedureDescrip" + - "tion\"F\n\025ExecProcedureResponse\022\030\n\020expecte" + - "d_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026" + - "IsProcedureDoneRequest\022(\n\tprocedure\030\001 \001(" + - "\0132\025.ProcedureDescription\"W\n\027IsProcedureD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snap" + - "shot\030\002 \001(\0132\025.ProcedureDescription2\365\027\n\rMa" + - "sterService\022S\n\024GetSchemaAlterStatus\022\034.Ge", - "tSchemaAlterStatusRequest\032\035.GetSchemaAlt" + - "erStatusResponse\022P\n\023GetTableDescriptors\022" + - "\033.GetTableDescriptorsRequest\032\034.GetTableD" + - "escriptorsResponse\022>\n\rGetTableNames\022\025.Ge" + - "tTableNamesRequest\032\026.GetTableNamesRespon" + - "se\022G\n\020GetClusterStatus\022\030.GetClusterStatu" + - "sRequest\032\031.GetClusterStatusResponse\022D\n\017I" + - "sMasterRunning\022\027.IsMasterRunningRequest\032" + - "\030.IsMasterRunningResponse\0222\n\tAddColumn\022\021" + - ".AddColumnRequest\032\022.AddColumnResponse\022;\n", - "\014DeleteColumn\022\024.DeleteColumnRequest\032\025.De" + - "leteColumnResponse\022;\n\014ModifyColumn\022\024.Mod" + - "ifyColumnRequest\032\025.ModifyColumnResponse\022" + - "5\n\nMoveRegion\022\022.MoveRegionRequest\032\023.Move" + - "RegionResponse\022Y\n\026DispatchMergingRegions" + - "\022\036.DispatchMergingRegionsRequest\032\037.Dispa" + - "tchMergingRegionsResponse\022;\n\014AssignRegio" + - "n\022\024.AssignRegionRequest\032\025.AssignRegionRe" + - "sponse\022A\n\016UnassignRegion\022\026.UnassignRegio" + - "nRequest\032\027.UnassignRegionResponse\022>\n\rOff", - "lineRegion\022\025.OfflineRegionRequest\032\026.Offl" + - "ineRegionResponse\0228\n\013DeleteTable\022\023.Delet" + - "eTableRequest\032\024.DeleteTableResponse\022>\n\rt" + - "runcateTable\022\025.TruncateTableRequest\032\026.Tr" + - "uncateTableResponse\0228\n\013EnableTable\022\023.Ena" + - "bleTableRequest\032\024.EnableTableResponse\022;\n" + - "\014DisableTable\022\024.DisableTableRequest\032\025.Di" + - "sableTableResponse\0228\n\013ModifyTable\022\023.Modi" + - "fyTableRequest\032\024.ModifyTableResponse\0228\n\013" + - "CreateTable\022\023.CreateTableRequest\032\024.Creat", - "eTableResponse\022/\n\010Shutdown\022\020.ShutdownReq" + - "uest\032\021.ShutdownResponse\0225\n\nStopMaster\022\022." + - "StopMasterRequest\032\023.StopMasterResponse\022," + - "\n\007Balance\022\017.BalanceRequest\032\020.BalanceResp" + - "onse\022M\n\022SetBalancerRunning\022\032.SetBalancer" + - "RunningRequest\032\033.SetBalancerRunningRespo" + - "nse\022A\n\016RunCatalogScan\022\026.RunCatalogScanRe" + - "quest\032\027.RunCatalogScanResponse\022S\n\024Enable" + - "CatalogJanitor\022\034.EnableCatalogJanitorReq" + - "uest\032\035.EnableCatalogJanitorResponse\022\\\n\027I", - "sCatalogJanitorEnabled\022\037.IsCatalogJanito" + - "rEnabledRequest\032 .IsCatalogJanitorEnable" + - "dResponse\022L\n\021ExecMasterService\022\032.Coproce" + - "ssorServiceRequest\032\033.CoprocessorServiceR" + - "esponse\022/\n\010Snapshot\022\020.SnapshotRequest\032\021." + - "SnapshotResponse\022V\n\025GetCompletedSnapshot" + - "s\022\035.GetCompletedSnapshotsRequest\032\036.GetCo" + - "mpletedSnapshotsResponse\022A\n\016DeleteSnapsh" + - "ot\022\026.DeleteSnapshotRequest\032\027.DeleteSnaps" + - "hotResponse\022A\n\016IsSnapshotDone\022\026.IsSnapsh", - "otDoneRequest\032\027.IsSnapshotDoneResponse\022D" + - "\n\017RestoreSnapshot\022\027.RestoreSnapshotReque" + - "st\032\030.RestoreSnapshotResponse\022V\n\025IsRestor" + - "eSnapshotDone\022\035.IsRestoreSnapshotDoneReq" + - "uest\032\036.IsRestoreSnapshotDoneResponse\022>\n\r" + - "ExecProcedure\022\025.ExecProcedureRequest\032\026.E" + - "xecProcedureResponse\022E\n\024ExecProcedureWit" + - "hRet\022\025.ExecProcedureRequest\032\026.ExecProced" + - "ureResponse\022D\n\017IsProcedureDone\022\027.IsProce" + - "dureDoneRequest\032\030.IsProcedureDoneRespons", - "e\022D\n\017ModifyNamespace\022\027.ModifyNamespaceRe" + - "quest\032\030.ModifyNamespaceResponse\022D\n\017Creat" + - "eNamespace\022\027.CreateNamespaceRequest\032\030.Cr" + - "eateNamespaceResponse\022D\n\017DeleteNamespace" + - "\022\027.DeleteNamespaceRequest\032\030.DeleteNamesp" + - "aceResponse\022Y\n\026GetNamespaceDescriptor\022\036." + - "GetNamespaceDescriptorRequest\032\037.GetNames" + - "paceDescriptorResponse\022_\n\030ListNamespaceD" + - "escriptors\022 .ListNamespaceDescriptorsReq" + - "uest\032!.ListNamespaceDescriptorsResponse\022", - "t\n\037ListTableDescriptorsByNamespace\022\'.Lis" + - "tTableDescriptorsByNamespaceRequest\032(.Li" + - "stTableDescriptorsByNamespaceResponse\022b\n" + - "\031ListTableNamesByNamespace\022!.ListTableNa" + - "mesByNamespaceRequest\032\".ListTableNamesBy" + - "NamespaceResponseBB\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\014MasterProtosH\001\210" + - "\001\001\240\001\001" + "ame\"6\n\024GetTableStateRequest\022\036\n\ntable_nam" + + "e\030\001 \002(\0132\n.TableName\"9\n\025GetTableStateResp" + + "onse\022 \n\013table_state\030\001 \002(\0132\013.TableState\"\031", + "\n\027GetClusterStatusRequest\"B\n\030GetClusterS" + + "tatusResponse\022&\n\016cluster_status\030\001 \002(\0132\016." + + "ClusterStatus\"\030\n\026IsMasterRunningRequest\"" + + "4\n\027IsMasterRunningResponse\022\031\n\021is_master_" + + "running\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n" + + "\tprocedure\030\001 \002(\0132\025.ProcedureDescription\"" + + "F\n\025ExecProcedureResponse\022\030\n\020expected_tim" + + "eout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsPro" + + "cedureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.P" + + "rocedureDescription\"W\n\027IsProcedureDoneRe", + "sponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030" + + "\002 \001(\0132\025.ProcedureDescription2\265\030\n\rMasterS" + + "ervice\022S\n\024GetSchemaAlterStatus\022\034.GetSche" + + "maAlterStatusRequest\032\035.GetSchemaAlterSta" + + "tusResponse\022P\n\023GetTableDescriptors\022\033.Get" + + "TableDescriptorsRequest\032\034.GetTableDescri" + + "ptorsResponse\022>\n\rGetTableNames\022\025.GetTabl" + + "eNamesRequest\032\026.GetTableNamesResponse\022G\n" + + "\020GetClusterStatus\022\030.GetClusterStatusRequ" + + "est\032\031.GetClusterStatusResponse\022D\n\017IsMast", + "erRunning\022\027.IsMasterRunningRequest\032\030.IsM" + + "asterRunningResponse\0222\n\tAddColumn\022\021.AddC" + + "olumnRequest\032\022.AddColumnResponse\022;\n\014Dele" + + "teColumn\022\024.DeleteColumnRequest\032\025.DeleteC" + + "olumnResponse\022;\n\014ModifyColumn\022\024.ModifyCo" + + "lumnRequest\032\025.ModifyColumnResponse\0225\n\nMo" + + "veRegion\022\022.MoveRegionRequest\032\023.MoveRegio" + + "nResponse\022Y\n\026DispatchMergingRegions\022\036.Di" + + "spatchMergingRegionsRequest\032\037.DispatchMe" + + "rgingRegionsResponse\022;\n\014AssignRegion\022\024.A", + "ssignRegionRequest\032\025.AssignRegionRespons" + + "e\022A\n\016UnassignRegion\022\026.UnassignRegionRequ" + + "est\032\027.UnassignRegionResponse\022>\n\rOfflineR" + + "egion\022\025.OfflineRegionRequest\032\026.OfflineRe" + + "gionResponse\0228\n\013DeleteTable\022\023.DeleteTabl" + + "eRequest\032\024.DeleteTableResponse\022>\n\rtrunca" + + "teTable\022\025.TruncateTableRequest\032\026.Truncat" + + "eTableResponse\0228\n\013EnableTable\022\023.EnableTa" + + "bleRequest\032\024.EnableTableResponse\022;\n\014Disa" + + "bleTable\022\024.DisableTableRequest\032\025.Disable", + "TableResponse\0228\n\013ModifyTable\022\023.ModifyTab" + + "leRequest\032\024.ModifyTableResponse\0228\n\013Creat" + + "eTable\022\023.CreateTableRequest\032\024.CreateTabl" + + "eResponse\022/\n\010Shutdown\022\020.ShutdownRequest\032" + + "\021.ShutdownResponse\0225\n\nStopMaster\022\022.StopM" + + "asterRequest\032\023.StopMasterResponse\022,\n\007Bal" + + "ance\022\017.BalanceRequest\032\020.BalanceResponse\022" + + "M\n\022SetBalancerRunning\022\032.SetBalancerRunni" + + "ngRequest\032\033.SetBalancerRunningResponse\022A" + + "\n\016RunCatalogScan\022\026.RunCatalogScanRequest", + "\032\027.RunCatalogScanResponse\022S\n\024EnableCatal" + + "ogJanitor\022\034.EnableCatalogJanitorRequest\032" + + "\035.EnableCatalogJanitorResponse\022\\\n\027IsCata" + + "logJanitorEnabled\022\037.IsCatalogJanitorEnab" + + "ledRequest\032 .IsCatalogJanitorEnabledResp" + + "onse\022L\n\021ExecMasterService\022\032.CoprocessorS" + + "erviceRequest\032\033.CoprocessorServiceRespon" + + "se\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Snaps" + + "hotResponse\022V\n\025GetCompletedSnapshots\022\035.G" + + "etCompletedSnapshotsRequest\032\036.GetComplet", + "edSnapshotsResponse\022A\n\016DeleteSnapshot\022\026." + + "DeleteSnapshotRequest\032\027.DeleteSnapshotRe" + + "sponse\022A\n\016IsSnapshotDone\022\026.IsSnapshotDon" + + "eRequest\032\027.IsSnapshotDoneResponse\022D\n\017Res" + + "toreSnapshot\022\027.RestoreSnapshotRequest\032\030." + + "RestoreSnapshotResponse\022V\n\025IsRestoreSnap" + + "shotDone\022\035.IsRestoreSnapshotDoneRequest\032" + + "\036.IsRestoreSnapshotDoneResponse\022>\n\rExecP" + + "rocedure\022\025.ExecProcedureRequest\032\026.ExecPr" + + "ocedureResponse\022E\n\024ExecProcedureWithRet\022", + "\025.ExecProcedureRequest\032\026.ExecProcedureRe" + + "sponse\022D\n\017IsProcedureDone\022\027.IsProcedureD" + + "oneRequest\032\030.IsProcedureDoneResponse\022D\n\017" + + "ModifyNamespace\022\027.ModifyNamespaceRequest" + + "\032\030.ModifyNamespaceResponse\022D\n\017CreateName" + + "space\022\027.CreateNamespaceRequest\032\030.CreateN" + + "amespaceResponse\022D\n\017DeleteNamespace\022\027.De" + + "leteNamespaceRequest\032\030.DeleteNamespaceRe" + + "sponse\022Y\n\026GetNamespaceDescriptor\022\036.GetNa" + + "mespaceDescriptorRequest\032\037.GetNamespaceD", + "escriptorResponse\022_\n\030ListNamespaceDescri" + + "ptors\022 .ListNamespaceDescriptorsRequest\032" + + "!.ListNamespaceDescriptorsResponse\022t\n\037Li" + + "stTableDescriptorsByNamespace\022\'.ListTabl" + + "eDescriptorsByNamespaceRequest\032(.ListTab" + + "leDescriptorsByNamespaceResponse\022b\n\031List" + + "TableNamesByNamespace\022!.ListTableNamesBy" + + "NamespaceRequest\032\".ListTableNamesByNames" + + "paceResponse\022>\n\rGetTableState\022\025.GetTable" + + "StateRequest\032\026.GetTableStateResponseBB\n*", + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -45293,50 +46508,62 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); - internal_static_GetClusterStatusRequest_descriptor = + internal_static_GetTableStateRequest_descriptor = getDescriptor().getMessageTypes().get(74); + internal_static_GetTableStateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetTableStateResponse_descriptor = + getDescriptor().getMessageTypes().get(75); + internal_static_GetTableStateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateResponse_descriptor, + new java.lang.String[] { "TableState", }); + internal_static_GetClusterStatusRequest_descriptor = + getDescriptor().getMessageTypes().get(76); internal_static_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 10274b4..6790838 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -3096,619 +3096,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:SplitLogTask) } - public interface TableOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Table.State state = 1 [default = ENABLED]; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - boolean hasState(); - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); - } - /** - * Protobuf type {@code Table} - * - *
-   **
-   * The znode that holds state of table.
-   * 
- */ - public static final class Table extends - com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Table defaultInstance; - public static Table getDefaultInstance() { - return defaultInstance; - } - - public Table getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Table( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser
() { - public Table parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Table(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser
getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code Table.State} - * - *
-     * Table's current state
-     * 
- */ - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - /** - * ENABLED = 0; - */ - ENABLED(0, 0), - /** - * DISABLED = 1; - */ - DISABLED(1, 1), - /** - * DISABLING = 2; - */ - DISABLING(2, 2), - /** - * ENABLING = 3; - */ - ENABLING(3, 3), - ; - - /** - * ENABLED = 0; - */ - public static final int ENABLED_VALUE = 0; - /** - * DISABLED = 1; - */ - public static final int DISABLED_VALUE = 1; - /** - * DISABLING = 2; - */ - public static final int DISABLING_VALUE = 2; - /** - * ENABLING = 3; - */ - public static final int ENABLING_VALUE = 3; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return ENABLED; - case 1: return DISABLED; - case 2: return DISABLING; - case 3: return ENABLING; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = values(); - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:Table.State) - } - - private int bitField0_; - // required .Table.State state = 1 [default = ENABLED]; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code Table} - * - *
-     **
-     * The znode that holds state of table.
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .Table.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Table) - } - - static { - defaultInstance = new Table(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Table) - } - public interface ReplicationPeerOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -9366,11 +8753,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SplitLogTask_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_Table_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Table_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_ReplicationPeer_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -9425,28 +8807,25 @@ public final class ZooKeeperProtos { "de:\007UNKNOWN\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005" + "OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004", "\">\n\014RecoveryMode\022\013\n\007UNKNOWN\020\000\022\021\n\rLOG_SPL" + - "ITTING\020\001\022\016\n\nLOG_REPLAY\020\002\"n\n\005Table\022$\n\005sta" + - "te\030\001 \002(\0162\014.Table.State:\007ENABLED\"?\n\005State" + - "\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING" + - "\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017ReplicationPeer\022\022\n\n" + - "clusterkey\030\001 \002(\t\022\037\n\027replicationEndpointI" + - "mpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132\017.BytesBytesPair" + - "\022&\n\rconfiguration\030\004 \003(\0132\017.NameStringPair" + - "\"^\n\020ReplicationState\022&\n\005state\030\001 \002(\0162\027.Re" + - "plicationState.State\"\"\n\005State\022\013\n\007ENABLED", - "\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPosit" + - "ion\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock" + - "\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\nta" + - "ble_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner" + - "\030\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021" + - "\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cr" + - "eate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fa" + - "mily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026" + - "RegionStoreSequenceIds\022 \n\030last_flushed_s" + - "equence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 ", - "\003(\0132\020.StoreSequenceIdBE\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\017ZooKeeperPr" + - "otosH\001\210\001\001\240\001\001" + "ITTING\020\001\022\016\n\nLOG_REPLAY\020\002\"\215\001\n\017Replication" + + "Peer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicationE" + + "ndpointImpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132\017.BytesB" + + "ytesPair\022&\n\rconfiguration\030\004 \003(\0132\017.NameSt" + + "ringPair\"^\n\020ReplicationState\022&\n\005state\030\001 " + + "\002(\0162\027.ReplicationState.State\"\"\n\005State\022\013\n" + + "\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationH" + + "LogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017Replica" + + "tionLock\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLo", + "ck\022\036\n\ntable_name\030\001 \001(\0132\n.TableName\022\037\n\nlo" + + "ck_owner\030\002 \001(\0132\013.ServerName\022\021\n\tthread_id" + + "\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001" + + "(\t\022\023\n\013create_time\030\006 \001(\003\";\n\017StoreSequence" + + "Id\022\023\n\013family_name\030\001 \002(\014\022\023\n\013sequence_id\030\002" + + " \002(\004\"g\n\026RegionStoreSequenceIds\022 \n\030last_f" + + "lushed_sequence_id\030\001 \002(\004\022+\n\021store_sequen" + + "ce_id\030\002 \003(\0132\020.StoreSequenceIdBE\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\017Zoo" + + "KeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9477,50 +8856,44 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitLogTask_descriptor, new java.lang.String[] { "State", "ServerName", "Mode", }); - internal_static_Table_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_Table_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Table_descriptor, - new java.lang.String[] { "State", }); internal_static_ReplicationPeer_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(4); internal_static_ReplicationPeer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationPeer_descriptor, new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", }); internal_static_ReplicationState_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(5); internal_static_ReplicationState_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationState_descriptor, new java.lang.String[] { "State", }); internal_static_ReplicationHLogPosition_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(6); internal_static_ReplicationHLogPosition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationHLogPosition_descriptor, new java.lang.String[] { "Position", }); internal_static_ReplicationLock_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(7); internal_static_ReplicationLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationLock_descriptor, new java.lang.String[] { "LockOwner", }); internal_static_TableLock_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(8); internal_static_TableLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); internal_static_StoreSequenceId_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(9); internal_static_StoreSequenceId_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StoreSequenceId_descriptor, new java.lang.String[] { "FamilyName", "SequenceId", }); internal_static_RegionStoreSequenceIds_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(10); internal_static_RegionStoreSequenceIds_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionStoreSequenceIds_descriptor, diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index dbf00dc..f648067 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -54,6 +54,18 @@ message RegionState { } } +message TableState { + // Table's current state + enum State { + ENABLED = 0; + DISABLED = 1; + DISABLING = 2; + ENABLING = 3; + } + // This is the table's state. + required State state = 1 [default = ENABLED]; +} + message RegionInTransition { required RegionSpecifier spec = 1; required RegionState region_state = 2; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 94ea860..85daf43 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -326,6 +326,14 @@ message GetTableNamesResponse { repeated TableName table_names = 1; } +message GetTableStateRequest { + required TableName table_name = 1; +} + +message GetTableStateResponse { + required TableState table_state = 1; +} + message GetClusterStatusRequest { } @@ -565,4 +573,8 @@ service MasterService { /** returns a list of tables for a given namespace*/ rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest) returns(ListTableNamesByNamespaceResponse); + + /** returns table state */ + rpc GetTableState(GetTableStateRequest) + returns(GetTableStateResponse); } diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 4d727c6..e06a19c 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -81,23 +81,6 @@ message SplitLogTask { } /** - * The znode that holds state of table. - */ -message Table { - // Table's current state - enum State { - ENABLED = 0; - DISABLED = 1; - DISABLING = 2; - ENABLING = 3; - } - // This is the table's state. If no znode for a table, - // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class - // for more. - required State state = 1 [default = ENABLED]; -} - -/** * Used by replication. Holds a replication peer key. */ message ReplicationPeer { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index 2642e29..1019b2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -55,12 +55,4 @@ public interface CoordinatedStateManager { * @return instance of Server coordinated state manager runs within */ Server getServer(); - - /** - * Returns implementation of TableStateManager. - * @throws InterruptedException if operation is interrupted - * @throws CoordinatedStateException if error happens in underlying coordination mechanism - */ - TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java deleted file mode 100644 index 56cd4ae..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; - -import java.io.InterruptedIOException; -import java.util.Set; - -/** - * Helper class for table state management for operations running inside - * RegionServer or HMaster. - * Depending on implementation, fetches information from HBase system table, - * local data store, ZooKeeper ensemble or somewhere else. - * Code running on client side (with no coordinated state context) shall instead use - * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader} - */ -@InterfaceAudience.Private -public interface TableStateManager { - - /** - * Sets the table into desired state. Fails silently if the table is already in this state. - * @param tableName table to process - * @param state new state of this table - * @throws CoordinatedStateException if error happened when trying to set table state - */ - void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException; - - /** - * Sets the specified table into the newState, but only if the table is already in - * one of the possibleCurrentStates (otherwise no operation is performed). - * @param tableName table to process - * @param newState new state for the table - * @param states table should be in one of these states for the operation - * to be performed - * @throws CoordinatedStateException if error happened while performing operation - * @return true if operation succeeded, false otherwise - */ - boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException; - - /** - * Sets the specified table into the newState, but only if the table is NOT in - * one of the possibleCurrentStates (otherwise no operation is performed). - * @param tableName table to process - * @param newState new state for the table - * @param states table should NOT be in one of these states for the operation - * to be performed - * @throws CoordinatedStateException if error happened while performing operation - * @return true if operation succeeded, false otherwise - */ - boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException; - - /** - * @return true if the table is in any one of the listed states, false otherwise. - */ - boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states); - - /** - * Mark table as deleted. Fails silently if the table is not currently marked as disabled. - * @param tableName table to be deleted - * @throws CoordinatedStateException if error happened while performing operation - */ - void setDeletedTable(TableName tableName) throws CoordinatedStateException; - - /** - * Checks if table is present. - * - * @param tableName table we're checking - * @return true if the table is present, false otherwise - */ - boolean isTablePresent(TableName tableName); - - /** - * @return set of tables which are in any one of the listed states, empty Set if none - */ - Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException; - - /** - * If the table is found in the given state the in-memory state is removed. This - * helps in cases where CreateTable is to be retried by the client in case of - * failures. If deletePermanentState is true - the flag kept permanently is - * also reset. - * - * @param tableName table we're working on - * @param states if table isn't in any one of these states, operation aborts - * @param deletePermanentState if true, reset the permanent flag - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index 91e1f2a..89bdfa2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -194,6 +194,11 @@ class CoprocessorHConnection implements ClusterConnection { } @Override + public TableState getTableState(TableName tableName) throws IOException { + return delegate.getTableState(tableName); + } + + @Override public HTableDescriptor[] listTables() throws IOException { return delegate.listTables(); } @@ -448,4 +453,4 @@ class CoprocessorHConnection implements ClusterConnection { public AsyncProcess getAsyncProcess() { return delegate.getAsyncProcess(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index 1891941..5680683 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hbase.coordination; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableStateManager; /** * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations. @@ -48,8 +46,4 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan public Server getServer() { return null; } - - @Override - public abstract TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException; -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 4d62e54..e539130 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -18,12 +18,8 @@ package org.apache.hadoop.hbase.coordination; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. @@ -43,14 +39,4 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { public Server getServer() { return server; } - - @Override - public TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException { - try { - return new ZKTableStateManager(server.getZooKeeper()); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 53f159a..8a29fc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -41,13 +40,13 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -60,23 +59,21 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; @@ -91,8 +88,6 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; -import com.google.common.annotations.VisibleForTesting; - /** * Manages and performs region assignment. * Related communications with regionserver are all done over RPC. @@ -217,14 +212,14 @@ public class AssignmentManager { * @param service Executor service * @param metricsMaster metrics manager * @param tableLockManager TableLock manager - * @throws CoordinatedStateException * @throws IOException */ public AssignmentManager(Server server, ServerManager serverManager, final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, - final TableLockManager tableLockManager) - throws IOException, CoordinatedStateException { + final TableLockManager tableLockManager, + final TableStateManager tableStateManager) + throws IOException { this.server = server; this.serverManager = serverManager; this.executorService = service; @@ -236,15 +231,9 @@ public class AssignmentManager { this.shouldAssignRegionsWithFavoredNodes = conf.getClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals( FavoredNodeLoadBalancer.class); - try { - if (server.getCoordinatedStateManager() != null) { - this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager(); - } else { - this.tableStateManager = null; - } - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } + + this.tableStateManager = tableStateManager; + // This is the max attempts, not retries, so it should be at least 1. this.maximumAttempts = Math.max(1, this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); @@ -402,10 +391,9 @@ public class AssignmentManager { * @throws IOException * @throws KeeperException * @throws InterruptedException - * @throws CoordinatedStateException */ void joinCluster() throws IOException, - KeeperException, InterruptedException, CoordinatedStateException { + KeeperException, InterruptedException { long startTime = System.currentTimeMillis(); // Concurrency note: In the below the accesses on regionsInTransition are // outside of a synchronization block where usually all accesses to RIT are @@ -440,10 +428,9 @@ public class AssignmentManager { * Map of dead servers and their regions. Can be null. * @throws IOException * @throws InterruptedException - * @throws CoordinatedStateException */ boolean processDeadServersAndRegionsInTransition(final Set deadServers) - throws IOException, InterruptedException, CoordinatedStateException { + throws IOException, InterruptedException { boolean failover = !serverManager.getDeadServers().isEmpty(); if (failover) { // This may not be a failover actually, especially if meta is on this master. @@ -512,8 +499,8 @@ public class AssignmentManager { if (!failover) { disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, TableState.State.DISABLING, + TableState.State.ENABLING); // Clean re/start, mark all user regions closed before reassignment allRegions = regionStates.closeAllUserRegions( @@ -731,7 +718,7 @@ public class AssignmentManager { for (RegionState state: states) { HRegionInfo region = state.getRegion(); regionStates.updateRegionState( - region, State.PENDING_OPEN, destination); + region, RegionState.State.PENDING_OPEN, destination); List favoredNodes = ServerName.EMPTY_SERVER_LIST; if (this.shouldAssignRegionsWithFavoredNodes) { favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); @@ -848,7 +835,7 @@ public class AssignmentManager { if (!serverManager.isServerOnline(server)) { LOG.debug("Offline " + region.getRegionNameAsString() + ", no need to unassign since it's on a dead server: " + server); - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); return; } try { @@ -872,7 +859,7 @@ public class AssignmentManager { || t instanceof ServerNotRunningYetException) { LOG.debug("Offline " + region.getRegionNameAsString() + ", it's not any more on " + server, t); - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); return; } else if (t instanceof FailedServerException || t instanceof RegionAlreadyInTransitionException) { @@ -906,7 +893,7 @@ public class AssignmentManager { + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); if (state != null) { - regionStates.updateRegionState(region, State.FAILED_CLOSE); + regionStates.updateRegionState(region, RegionState.State.FAILED_CLOSE); } return; } @@ -922,7 +909,7 @@ public class AssignmentManager { } // Run out of attempts if (state != null) { - regionStates.updateRegionState(region, State.FAILED_CLOSE); + regionStates.updateRegionState(region, RegionState.State.FAILED_CLOSE); } } @@ -1013,7 +1000,7 @@ public class AssignmentManager { Thread.currentThread().interrupt(); } } - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); return; } // In case of assignment from EnableTableHandler table state is ENABLING. Any how @@ -1024,7 +1011,7 @@ public class AssignmentManager { // will not be in ENABLING or ENABLED state. TableName tableName = region.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) { + TableState.State.ENABLED, TableState.State.ENABLING)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); } @@ -1032,7 +1019,7 @@ public class AssignmentManager { " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN regionStates.updateRegionState(region, - State.PENDING_OPEN, plan.getDestination()); + RegionState.State.PENDING_OPEN, plan.getDestination()); boolean needNewPlan; final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + @@ -1107,7 +1094,7 @@ public class AssignmentManager { } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); Thread.currentThread().interrupt(); return; } @@ -1143,7 +1130,7 @@ public class AssignmentManager { LOG.warn("Failed to get region plan", e); } if (newPlan == null) { - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); LOG.warn("Unable to find a viable location to assign region " + region.getRegionNameAsString()); return; @@ -1153,7 +1140,7 @@ public class AssignmentManager { // Clean out plan we failed execute and one that doesn't look like it'll // succeed anyways; we need a new plan! // Transition back to OFFLINE - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); plan = newPlan; } else if(plan.getDestination().equals(newPlan.getDestination()) && previousException instanceof FailedServerException) { @@ -1165,7 +1152,7 @@ public class AssignmentManager { } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); Thread.currentThread().interrupt(); return; } @@ -1173,7 +1160,7 @@ public class AssignmentManager { } } // Run out of attempts - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); } finally { metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime); } @@ -1181,8 +1168,8 @@ public class AssignmentManager { private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { if (this.tableStateManager.isTableState(region.getTable(), - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) { + TableState.State.DISABLED, + TableState.State.DISABLING) || replicasToClose.contains(region)) { LOG.info("Table " + region.getTable() + " is disabled or disabling;" + " skipping assign of " + region.getRegionNameAsString()); offlineDisabledRegion(region); @@ -1337,7 +1324,7 @@ public class AssignmentManager { } } state = regionStates.updateRegionState( - region, State.PENDING_CLOSE); + region, RegionState.State.PENDING_CLOSE); } else if (state.isFailedOpen()) { // The region is not open yet regionOffline(region); @@ -1355,7 +1342,7 @@ public class AssignmentManager { // Region is expected to be reassigned afterwards if (!replicasToClose.contains(region) - && regionStates.isRegionInState(region, State.OFFLINE)) { + && regionStates.isRegionInState(region, RegionState.State.OFFLINE)) { assign(region); } } @@ -1381,7 +1368,7 @@ public class AssignmentManager { public boolean waitForAssignment(HRegionInfo regionInfo) throws InterruptedException { while (!regionStates.isRegionOnline(regionInfo)) { - if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN) + if (regionStates.isRegionInState(regionInfo, RegionState.State.FAILED_OPEN) || this.server.isStopped()) { return false; } @@ -1526,7 +1513,7 @@ public class AssignmentManager { for (HRegionInfo hri : regionsFromMetaScan) { TableName tableName = hri.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -1571,14 +1558,14 @@ public class AssignmentManager { * @throws IOException */ Set rebuildUserRegions() throws - IOException, KeeperException, CoordinatedStateException { + IOException, KeeperException { Set disabledOrEnablingTables = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, TableState.State.ENABLING); Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, + TableState.State.DISABLING, + TableState.State.ENABLING); // Region assignment from META List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection()); @@ -1613,11 +1600,11 @@ public class AssignmentManager { HRegionInfo regionInfo = hrl.getRegionInfo(); if (regionInfo == null) continue; int replicaId = regionInfo.getReplicaId(); - State state = RegionStateStore.getRegionState(result, replicaId); + RegionState.State state = RegionStateStore.getRegionState(result, replicaId); // keep a track of replicas to close. These were the replicas of the split parents // from the previous life of the master. The master should have closed them before // but it couldn't maybe because it crashed - if (replicaId == 0 && state.equals(State.SPLIT)) { + if (replicaId == 0 && state.equals(RegionState.State.SPLIT)) { for (HRegionLocation h : locations) { replicasToClose.add(h.getRegionInfo()); } @@ -1625,7 +1612,7 @@ public class AssignmentManager { ServerName lastHost = hrl.getServerName(); ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId); regionStates.createRegionState(regionInfo, state, regionLocation, lastHost); - if (!regionStates.isRegionInState(regionInfo, State.OPEN)) { + if (!regionStates.isRegionInState(regionInfo, RegionState.State.OPEN)) { // Region is not open (either offline or in transition), skip continue; } @@ -1643,7 +1630,7 @@ public class AssignmentManager { // this will be used in rolling restarts if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -1660,9 +1647,9 @@ public class AssignmentManager { * @throws IOException */ private void recoverTableInDisablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set disablingTables = - tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING); + tableStateManager.getTablesInStates(TableState.State.DISABLING); if (disablingTables.size() != 0) { for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler @@ -1684,9 +1671,9 @@ public class AssignmentManager { * @throws IOException */ private void recoverTableInEnablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set enablingTables = tableStateManager. - getTablesInStates(ZooKeeperProtos.Table.State.ENABLING); + getTablesInStates(TableState.State.ENABLING); if (enablingTables.size() != 0) { for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler @@ -1734,7 +1721,7 @@ public class AssignmentManager { if (!serverManager.isServerOnline(regionState.getServerName())) { continue; // SSH will handle it } - State state = regionState.getState(); + RegionState.State state = regionState.getState(); LOG.info("Processing " + regionState); switch (state) { case PENDING_OPEN: @@ -2020,13 +2007,13 @@ public class AssignmentManager { it.remove(); } else { if (tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { regionStates.regionOffline(hri); it.remove(); continue; } // Mark the region offline and assign it again by SSH - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } } finally { lock.unlock(); @@ -2042,7 +2029,7 @@ public class AssignmentManager { HRegionInfo hri = plan.getRegionInfo(); TableName tableName = hri.getTable(); if (tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { LOG.info("Ignored moving region of disabling/disabled table " + tableName); return; @@ -2076,8 +2063,8 @@ public class AssignmentManager { protected void setEnabledTable(TableName tableName) { try { this.tableStateManager.setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { + TableState.State.ENABLED); + } catch (IOException e) { // here we can abort as it is the start up flow String errorMsg = "Unable to ensure that the table " + tableName + " will be" + " enabled because of a ZooKeeper issue"; @@ -2098,19 +2085,19 @@ public class AssignmentManager { failedOpenTracker.put(encodedName, failedOpenCount); } if (failedOpenCount.incrementAndGet() >= maximumAttempts) { - regionStates.updateRegionState(hri, State.FAILED_OPEN); + regionStates.updateRegionState(hri, RegionState.State.FAILED_OPEN); // remove the tracking info to save memory, also reset // the count for next open initiative failedOpenTracker.remove(encodedName); } else { // Handle this the same as if it were opened and then closed. - RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED); + RegionState regionState = regionStates.updateRegionState(hri, RegionState.State.CLOSED); if (regionState != null) { // When there are more than one region server a new RS is selected as the // destination and the same is updated in the region plan. (HBASE-5546) if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + TableState.State.DISABLED, TableState.State.DISABLING) || + replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -2135,15 +2122,15 @@ public class AssignmentManager { // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(hri); } } private void onRegionClosed(final HRegionInfo hri) { if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + TableState.State.DISABLED, TableState.State.DISABLING) || + replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -2165,21 +2152,21 @@ public class AssignmentManager { return "Not in state good for split"; } - regionStates.updateRegionState(a, State.SPLITTING_NEW, sn); - regionStates.updateRegionState(b, State.SPLITTING_NEW, sn); - regionStates.updateRegionState(p, State.SPLITTING); + regionStates.updateRegionState(a, RegionState.State.SPLITTING_NEW, sn); + regionStates.updateRegionState(b, RegionState.State.SPLITTING_NEW, sn); + regionStates.updateRegionState(p, RegionState.State.SPLITTING); if (code == TransitionCode.SPLIT) { if (TEST_SKIP_SPLIT_HANDLING) { return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set"; } - regionOffline(p, State.SPLIT); + regionOffline(p, RegionState.State.SPLIT); regionOnline(a, sn, 1); regionOnline(b, sn, 1); // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } else { @@ -2205,7 +2192,7 @@ public class AssignmentManager { regionOffline(b); if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } } @@ -2222,9 +2209,9 @@ public class AssignmentManager { return "Not in state good for merge"; } - regionStates.updateRegionState(a, State.MERGING); - regionStates.updateRegionState(b, State.MERGING); - regionStates.updateRegionState(p, State.MERGING_NEW, sn); + regionStates.updateRegionState(a, RegionState.State.MERGING); + regionStates.updateRegionState(b, RegionState.State.MERGING); + regionStates.updateRegionState(p, RegionState.State.MERGING_NEW, sn); String encodedName = p.getEncodedName(); if (code == TransitionCode.READY_TO_MERGE) { @@ -2232,13 +2219,13 @@ public class AssignmentManager { new PairOfSameType(a, b)); } else if (code == TransitionCode.MERGED) { mergingRegions.remove(encodedName); - regionOffline(a, State.MERGED); - regionOffline(b, State.MERGED); + regionOffline(a, RegionState.State.MERGED); + regionOffline(b, RegionState.State.MERGED); regionOnline(p, sn, 1); // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } else { Callable mergeReplicasCallable = new Callable() { @@ -2264,7 +2251,7 @@ public class AssignmentManager { regionOffline(p); if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } @@ -2381,7 +2368,7 @@ public class AssignmentManager { * if not null. If the specified state is null, the new state is Offline. * The specified state can be Split/Merged/Offline/null only. */ - private void regionOffline(final HRegionInfo regionInfo, final State state) { + private void regionOffline(final HRegionInfo regionInfo, final RegionState.State state) { regionStates.regionOffline(regionInfo, state); removeClosedRegion(regionInfo); // remove the region plan as well just in case. @@ -2391,7 +2378,7 @@ public class AssignmentManager { // Tell our listeners that a region was closed sendRegionClosedNotification(regionInfo); // also note that all the replicas of the primary should be closed - if (state != null && state.equals(State.SPLIT)) { + if (state != null && state.equals(RegionState.State.SPLIT)) { Collection c = new ArrayList(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); @@ -2400,7 +2387,7 @@ public class AssignmentManager { replicasToClose.addAll(list); } } - else if (state != null && state.equals(State.MERGED)) { + else if (state != null && state.equals(RegionState.State.MERGED)) { Collection c = new ArrayList(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index eca5999..e4a54d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorType; @@ -77,7 +78,6 @@ import org.apache.hadoop.hbase.ipc.RequestContext; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.BalancerChore; import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; @@ -101,7 +101,6 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy; @@ -225,6 +224,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // monitor for distributed procedures MasterProcedureManagerHost mpmHost; + // handle table states + private TableStateManager tableStateManager; + /** flag used in test cases in order to simulate RS failures during master initialization */ private volatile boolean initializationBeforeMetaAssignment = false; @@ -410,7 +412,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.loadBalancerTracker.start(); this.assignmentManager = new AssignmentManager(this, serverManager, this.balancer, this.service, this.metricsMaster, - this.tableLockManager); + this.tableLockManager, tableStateManager); this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); @@ -487,6 +489,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); + this.tableStateManager = new TableStateManager(this); + this.tableStateManager.start(); + status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -692,7 +697,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } else { // Region already assigned. We didn't assign it. Add to in-memory state. regionStates.updateRegionState( - HRegionInfo.FIRST_META_REGIONINFO, State.OPEN, currentMetaServer); + HRegionInfo.FIRST_META_REGIONINFO, RegionState.State.OPEN, currentMetaServer); this.assignmentManager.regionOnline( HRegionInfo.FIRST_META_REGIONINFO, currentMetaServer); } @@ -761,8 +766,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } private void enableMeta(TableName metaTableName) { - if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName, - ZooKeeperProtos.Table.State.ENABLED)) { + if (!this.tableStateManager.isTableState(metaTableName, + TableState.State.ENABLED)) { this.assignmentManager.setEnabledTable(metaTableName); } } @@ -801,6 +806,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return this.fileSystemManager; } + @Override + public TableStateManager getTableStateManager() { + return tableStateManager; + } + /* * Start up all services. If any of these threads gets an unhandled exception * then they just die with a logged message. This should be fine because @@ -1479,7 +1489,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getTableStateManager(). - isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) { + isTableState(tableName, TableState.State.DISABLED)) { throw new TableNotDisabledException(tableName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index bb4a09c..490aabe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -37,8 +37,10 @@ import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; @@ -864,6 +866,24 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.GetTableStateResponse getTableState(RpcController controller, MasterProtos.GetTableStateRequest request) throws ServiceException { + try { + master.checkServiceStarted(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + TableState.State state = master.getTableStateManager() + .getTableState(tableName); + if (state == null) + throw new TableNotFoundException(tableName); + MasterProtos.GetTableStateResponse.Builder builder = + MasterProtos.GetTableStateResponse.newBuilder(); + builder.setTableState(TableState.convert(new TableState(state))); + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, IsCatalogJanitorEnabledRequest req) throws ServiceException { return IsCatalogJanitorEnabledResponse.newBuilder().setValue( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index c1334f5..213f7f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -66,6 +66,11 @@ public interface MasterServices extends Server { TableLockManager getTableLockManager(); /** + * @return Master's instance of {@link TableStateManager} + */ + TableStateManager getTableStateManager(); + + /** * @return Master's instance of {@link MasterCoprocessorHost} */ MasterCoprocessorHost getMasterCoprocessorHost(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index a29d675..17d6215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -29,6 +29,8 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -40,16 +42,11 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - /** * Region state accountant. It holds the states of all regions in the memory. * In normal scenario, it should match the meta table and the true region states. @@ -223,14 +220,14 @@ public class RegionStates { */ public synchronized boolean isRegionOffline(final HRegionInfo hri) { return getRegionState(hri) == null || (!isRegionInTransition(hri) - && isRegionInState(hri, State.OFFLINE, State.CLOSED)); + && isRegionInState(hri, RegionState.State.OFFLINE, RegionState.State.CLOSED)); } /** * @return True if specified region is in one of the specified states. */ public boolean isRegionInState( - final HRegionInfo hri, final State... states) { + final HRegionInfo hri, final RegionState.State... states) { return isRegionInState(hri.getEncodedName(), states); } @@ -238,7 +235,7 @@ public class RegionStates { * @return True if specified region is in one of the specified states. */ public boolean isRegionInState( - final String encodedName, final State... states) { + final String encodedName, final RegionState.State... states) { RegionState regionState = getRegionState(encodedName); return isOneOfStates(regionState, states); } @@ -300,12 +297,12 @@ public class RegionStates { * @return the current state */ public synchronized RegionState createRegionState(final HRegionInfo hri, - State newState, ServerName serverName, ServerName lastHost) { - if (newState == null || (newState == State.OPEN && serverName == null)) { - newState = State.OFFLINE; + RegionState.State newState, ServerName serverName, ServerName lastHost) { + if (newState == null || (newState == RegionState.State.OPEN && serverName == null)) { + newState = RegionState.State.OFFLINE; } if (hri.isOffline() && hri.isSplit()) { - newState = State.SPLIT; + newState = RegionState.State.SPLIT; serverName = null; } String encodedName = hri.getEncodedName(); @@ -316,7 +313,7 @@ public class RegionStates { } else { regionState = new RegionState(hri, newState, serverName); regionStates.put(encodedName, regionState); - if (newState == State.OPEN) { + if (newState == RegionState.State.OPEN) { if (!serverName.equals(lastHost)) { LOG.warn("Open region's last host " + lastHost + " should be the same as the current one " + serverName @@ -328,7 +325,7 @@ public class RegionStates { } else if (!regionState.isUnassignable()) { regionsInTransition.put(encodedName, regionState); } - if (lastHost != null && newState != State.SPLIT) { + if (lastHost != null && newState != RegionState.State.SPLIT) { addToServerHoldings(lastHost, hri); } } @@ -339,7 +336,7 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ public RegionState updateRegionState( - final HRegionInfo hri, final State state) { + final HRegionInfo hri, final RegionState.State state) { RegionState regionState = getRegionState(hri.getEncodedName()); return updateRegionState(hri, state, regionState == null ? null : regionState.getServerName()); @@ -349,7 +346,7 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ public RegionState updateRegionState( - final HRegionInfo hri, final State state, final ServerName serverName) { + final HRegionInfo hri, final RegionState.State state, final ServerName serverName) { return updateRegionState(hri, state, serverName, HConstants.NO_SEQNUM); } @@ -374,7 +371,7 @@ public class RegionStates { + " was opened on a dead server: " + serverName); return; } - updateRegionState(hri, State.OPEN, serverName, openSeqNum); + updateRegionState(hri, RegionState.State.OPEN, serverName, openSeqNum); synchronized (this) { regionsInTransition.remove(hri.getEncodedName()); @@ -490,26 +487,26 @@ public class RegionStates { * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew. */ public void regionOffline( - final HRegionInfo hri, final State expectedState) { + final HRegionInfo hri, final RegionState.State expectedState) { Preconditions.checkArgument(expectedState == null || RegionState.isUnassignable(expectedState), "Offlined region should not be " + expectedState); - if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) { + if (isRegionInState(hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) { // Remove it from all region maps deleteRegion(hri); return; } - State newState = - expectedState == null ? State.OFFLINE : expectedState; + RegionState.State newState = + expectedState == null ? RegionState.State.OFFLINE : expectedState; updateRegionState(hri, newState); synchronized (this) { regionsInTransition.remove(hri.getEncodedName()); ServerName oldServerName = regionAssignments.remove(hri); if (oldServerName != null && serverHoldings.containsKey(oldServerName) - && (newState == State.MERGED || newState == State.SPLIT + && (newState == RegionState.State.MERGED || newState == RegionState.State.SPLIT || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING))) { + TableState.State.DISABLED, TableState.State.DISABLING))) { // Offline the region only if it's merged/split, or the table is disabled/disabling. // Otherwise, offline it from this server only when it is online on a different server. LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); @@ -536,7 +533,7 @@ public class RegionStates { // Offline open regions, no need to offline if SPLIT/MERGED/OFFLINE if (isRegionOnline(region)) { regionsToOffline.add(region); - } else if (isRegionInState(region, State.SPLITTING, State.MERGING)) { + } else if (isRegionInState(region, RegionState.State.SPLITTING, RegionState.State.MERGING)) { LOG.debug("Offline splitting/merging region " + getRegionState(region)); regionsToOffline.add(region); } @@ -778,7 +775,7 @@ public class RegionStates { Map allUserRegions = new HashMap(toBeClosed.size()); for (HRegionInfo hri: toBeClosed) { - RegionState regionState = updateRegionState(hri, State.CLOSED); + RegionState regionState = updateRegionState(hri, RegionState.State.CLOSED); allUserRegions.put(hri, regionState.getServerName()); } return allUserRegions; @@ -918,9 +915,9 @@ public class RegionStates { } } - static boolean isOneOfStates(RegionState regionState, State... states) { - State s = regionState != null ? regionState.getState() : null; - for (State state: states) { + static boolean isOneOfStates(RegionState regionState, RegionState.State... states) { + RegionState.State s = regionState != null ? regionState.getState() : null; + for (RegionState.State state: states) { if (s == state) return true; } return false; @@ -930,8 +927,8 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ private RegionState updateRegionState(final HRegionInfo hri, - final State state, final ServerName serverName, long openSeqNum) { - if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) { + final RegionState.State state, final ServerName serverName, long openSeqNum) { + if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) { LOG.warn("Failed to open/close " + hri.getShortNameToLog() + " on " + serverName + ", set to " + state); } @@ -952,8 +949,8 @@ public class RegionStates { // For these states, region should be properly closed. // There should be no log splitting issue. - if ((state == State.CLOSED || state == State.MERGED - || state == State.SPLIT) && lastAssignments.containsKey(encodedName)) { + if ((state == RegionState.State.CLOSED || state == RegionState.State.MERGED + || state == RegionState.State.SPLIT) && lastAssignments.containsKey(encodedName)) { ServerName last = lastAssignments.get(encodedName); if (last.equals(serverName)) { lastAssignments.remove(encodedName); @@ -964,7 +961,7 @@ public class RegionStates { } // Once a region is opened, record its last assignment right away. - if (serverName != null && state == State.OPEN) { + if (serverName != null && state == RegionState.State.OPEN) { ServerName last = lastAssignments.get(encodedName); if (!serverName.equals(last)) { lastAssignments.put(encodedName, serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 99d794d..59fbf91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -257,7 +258,7 @@ public class TableNamespaceManager { } // Now check if the table is assigned, if not then fail fast - if (isTableAssigned()) { + if (isTableAssigned() && isTableEnabled()) { try { nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME); zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); @@ -297,6 +298,12 @@ public class TableNamespaceManager { return false; } + private boolean isTableEnabled() throws IOException { + return masterServices.getTableStateManager().getTableState( + TableName.NAMESPACE_TABLE_NAME + ).equals(TableState.State.ENABLED); + } + private boolean isTableAssigned() { return !masterServices.getAssignmentManager() .getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java new file mode 100644 index 0000000..6776b4b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -0,0 +1,216 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.TableState; + +/** + * This is a helper class used to manage table states. + * States persisted in tableinfo and cached internally. + */ +@InterfaceAudience.Private +public class TableStateManager { + private static final Log LOG = LogFactory.getLog(TableStateManager.class); + private final TableDescriptors descriptors; + private final TableLockManager lockManager; + + private final Map tableStates = Maps.newConcurrentMap(); + + public TableStateManager(MasterServices master) { + this.descriptors = master.getTableDescriptors(); + this.lockManager = master.getTableLockManager(); + } + + public void start() throws IOException { + Map all = descriptors.getAll(); + for (HTableDescriptor table : all.values()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Adding table state: " + table.getTableName() + ": " + table.getTableState()); + } + tableStates.put(table.getTableName(), table.getTableState()); + } + } + + /** + * Set table state to provided. + * Caller should lock table on write. + * @param tableName table to change state for + * @param state new state + * @throws IOException + */ + public void setTableState(TableName tableName, TableState.State state) throws IOException { + synchronized (tableStates) { + HTableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (descriptor.getTableState() != state) { + descriptor.setTableState(state); + writeDescriptor(descriptor); + } + } + } + + /** + * Set table state to provided but only if table in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param state new state + * @throws IOException + */ + public boolean setTableStateIfInStates(TableName tableName, TableState.State newState, TableState.State... states) throws IOException { + synchronized (tableStates) { + HTableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (isInStates(descriptor.getTableState(), states)) { + descriptor.setTableState(newState); + writeDescriptor(descriptor); + return true; + } else { + return false; + } + } + } + + + /** + * Set table state to provided but only if table not in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param state new state + * @throws IOException + */ + public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState, TableState.State... states) throws IOException { + synchronized (tableStates) { + HTableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (!isInStates(descriptor.getTableState(), states)) { + descriptor.setTableState(newState); + writeDescriptor(descriptor); + return true; + } else { + return false; + } + } + } + + public boolean isTableState(TableName tableName, TableState.State... states) { + TableState.State tableState = null; + try { + tableState = getTableState(tableName); + } catch (IOException e) { + LOG.error("Unable to get table state, probably table not exists"); + return false; + } + return tableState != null && isInStates(tableState, states); + } + + public void setDeletedTable(TableName tableName) throws IOException { + TableState.State remove = tableStates.remove(tableName); + if (remove == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was " + + "already deleted"); + } + } + + public boolean isTablePresent(TableName tableName) throws IOException { + return getTableState(tableName) != null; + } + + /** + * Return all tables in given states. + * + * @param states filter by states + * @return tables in given states + * @throws IOException + */ + public Set getTablesInStates(TableState.State... states) throws IOException { + Set rv = Sets.newHashSet(); + for (Map.Entry entry : tableStates.entrySet()) { + if (isInStates(entry.getValue(), states)) + rv.add(entry.getKey()); + } + return rv; + } + + public TableState.State getTableState(TableName tableName) throws IOException { + TableState.State tableState = tableStates.get(tableName); + if (tableState == null) { + HTableDescriptor descriptor = readDescriptor(tableName); + if (descriptor != null) + tableState = descriptor.getTableState(); + } + return tableState; + } + + private boolean isInStates(TableState.State state, TableState.State... target) { + for (TableState.State tableState : target) { + if (state.equals(tableState)) + return true; + } + return false; + } + + /** + * Write descriptor in place, update cache of states. + * Write lock should be hold by caller. + * + * @param descriptor what to write + */ + private void writeDescriptor(HTableDescriptor descriptor) throws IOException { + TableName tableName = descriptor.getTableName(); + TableState.State state = descriptor.getTableState(); + descriptors.add(descriptor); + LOG.debug("Table " + tableName + " written descriptor for state " + state); + tableStates.put(tableName, state); + LOG.debug("Table " + tableName + " updated state to " + state); + } + + /** + * Read current descriptor for table, update cache of states. + * + * @param table descriptor to read + * @return descriptor + * @throws IOException + */ + private HTableDescriptor readDescriptor(TableName tableName) throws IOException { + HTableDescriptor descriptor = descriptors.get(tableName); + if (descriptor == null) + tableStates.remove(tableName); + else + tableStates.put(tableName, descriptor.getTableState()); + return descriptor; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 4c20dc5..bbf16f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -30,14 +30,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -47,7 +48,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -108,8 +108,6 @@ public class CreateTableHandler extends EventHandler { if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { throw new TableExistsException(tableName); } - - checkAndSetEnablingTable(assignmentManager, tableName); success = true; } finally { if (!success) { @@ -119,47 +117,6 @@ public class CreateTableHandler extends EventHandler { return this; } - static void checkAndSetEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName) throws IOException { - // If we have multiple client threads trying to create the table at the - // same time, given the async nature of the operation, the table - // could be in a state where hbase:meta table hasn't been updated yet in - // the process() function. - // Use enabling state to tell if there is already a request for the same - // table in progress. This will introduce a new zookeeper call. Given - // createTable isn't a frequent operation, that should be ok. - // TODO: now that we have table locks, re-evaluate above -- table locks are not enough. - // We could have cleared the hbase.rootdir and not zk. How can we detect this case? - // Having to clean zk AND hdfs is awkward. - try { - if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(tableName, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - throw new TableExistsException(tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a ZooKeeper issue", e); - } - } - - static void removeEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName) { - // Try deleting the enabling node in case of error - // If this does not happen then if the client tries to create the table - // again with the same Active master - // It will block the creation saying TableAlreadyExists. - try { - assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, false); - } catch (CoordinatedStateException e) { - // Keeper exception should not happen here - LOG.error("Got a keeper exception while removing the ENABLING table znode " - + tableName, e); - } - } - @Override public String toString() { String name = "UnknownServerName"; @@ -199,9 +156,6 @@ public class CreateTableHandler extends EventHandler { releaseTableLock(); LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " + (exception == null ? "successful" : "failed. " + exception)); - if (exception != null) { - removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName()); - } } /** @@ -224,9 +178,12 @@ public class CreateTableHandler extends EventHandler { FileSystem fs = fileSystemManager.getFileSystem(); // 1. Create Table Descriptor + // using a copy of descriptor, table will be created enabling first + HTableDescriptor underConstruction = new HTableDescriptor(this.hTableDescriptor); + underConstruction.setTableState(TableState.State.ENABLING); Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - tempTableDir, this.hTableDescriptor, false); + tempTableDir, underConstruction, false); Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName); // 2. Create Regions @@ -252,20 +209,15 @@ public class CreateTableHandler extends EventHandler { ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); } - // 8. Set table enabled flag up in zk. - try { - assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that " + tableName + " will be" + - " enabled because of a ZooKeeper issue", e); - } + // 6. Enable table + assignmentManager.getTableStateManager().setTableState(tableName, + TableState.State.ENABLED); } /** * Create any replicas for the regions (the default replicas that was * already created is passed to the method) - * @param hTableDescriptor + * @param hTableDescriptor descriptor to use * @param regions default replicas * @return the combined list of default and non-default replicas */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index fb7aec8..f248eaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -25,13 +25,13 @@ import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -41,9 +41,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.htrace.Trace; /** @@ -91,16 +89,11 @@ public class DisableTableHandler extends EventHandler { // DISABLED or ENABLED. //TODO: reevaluate this since we have table locks now if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - LOG.info("Table " + tableName + " isn't enabled; skipping disable"); - throw new TableNotEnabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " disabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.DISABLING, + TableState.State.ENABLED)) { + LOG.info("Table " + tableName + " isn't enabled; skipping disable"); + throw new TableNotEnabledException(this.tableName); } } success = true; @@ -138,8 +131,6 @@ public class DisableTableHandler extends EventHandler { } } catch (IOException e) { LOG.error("Error trying to disable table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to disable table " + this.tableName, e); } finally { releaseTableLock(); } @@ -155,10 +146,10 @@ public class DisableTableHandler extends EventHandler { } } - private void handleDisableTable() throws IOException, CoordinatedStateException { + private void handleDisableTable() throws IOException { // Set table disabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); boolean done = false; while (true) { // Get list of online regions that are of this table. Regions that are @@ -187,7 +178,7 @@ public class DisableTableHandler extends EventHandler { } // Flip the table to disabled if success. if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); LOG.info("Disabled table, " + this.tableName + ", is done=" + done); } @@ -207,7 +198,7 @@ public class DisableTableHandler extends EventHandler { RegionStates regionStates = assignmentManager.getRegionStates(); for (HRegionInfo region: regions) { if (regionStates.isRegionInTransition(region) - && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) { + && !regionStates.isRegionInState(region, org.apache.hadoop.hbase.master.RegionState.State.FAILED_CLOSE)) { continue; } final HRegionInfo hri = region; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index b8edc0b..5771202 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -27,7 +27,6 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Pair; /** @@ -95,16 +94,8 @@ public class EnableTableHandler extends EventHandler { // retainAssignment is true only during recovery. In normal case it is false if (!this.skipTableStateCheck) { throw new TableNotFoundException(tableName); - } - try { - this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, true); - throw new TableNotFoundException(tableName); - } catch (CoordinatedStateException e) { - // TODO : Use HBCK to clear such nodes - LOG.warn("Failed to delete the ENABLING node for the table " + tableName - + ". The table will remain unusable. Run HBCK to manually fix the problem."); } + this.assignmentManager.getTableStateManager().setDeletedTable(tableName); } // There could be multiple client requests trying to disable or enable @@ -112,16 +103,11 @@ public class EnableTableHandler extends EventHandler { // After that, no other requests can be accepted until the table reaches // DISABLED or ENABLED. if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.DISABLED)) { - LOG.info("Table " + tableName + " isn't disabled; skipping enable"); - throw new TableNotDisabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.ENABLING, + TableState.State.DISABLED)) { + LOG.info("Table " + tableName + " isn't disabled; skipping enable"); + throw new TableNotDisabledException(this.tableName); } } success = true; @@ -156,11 +142,7 @@ public class EnableTableHandler extends EventHandler { if (cpHost != null) { cpHost.postEnableTableHandler(this.tableName); } - } catch (IOException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (InterruptedException e) { + } catch (IOException | InterruptedException e) { LOG.error("Error trying to enable the table " + this.tableName, e); } finally { releaseTableLock(); @@ -177,14 +159,14 @@ public class EnableTableHandler extends EventHandler { } } - private void handleEnableTable() throws IOException, CoordinatedStateException, + private void handleEnableTable() throws IOException, InterruptedException { // I could check table is disabling and if so, not enable but require // that user first finish disabling but that might be obnoxious. // Set table enabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.ENABLING); boolean done = false; ServerManager serverManager = ((HMaster)this.server).getServerManager(); // Get the regions of this table. We're done when all listed @@ -236,7 +218,7 @@ public class EnableTableHandler extends EventHandler { if (done) { // Flip the table to enabled. this.assignmentManager.getTableStateManager().setTableState( - this.tableName, ZooKeeperProtos.Table.State.ENABLED); + this.tableName, TableState.State.ENABLED); LOG.info("Table '" + this.tableName + "' was successfully enabled. Status: done=" + done); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index 48fb26f..50b8ef9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -36,12 +36,12 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @@ -64,7 +64,7 @@ public class ModifyTableHandler extends TableEventHandler { // Check operation is possible on the table in its current state // Also checks whether the table exists if (masterServices.getAssignmentManager().getTableStateManager() - .isTableState(this.htd.getTableName(), ZooKeeperProtos.Table.State.ENABLED) + .isTableState(this.htd.getTableName(), TableState.State.ENABLED) && this.htd.getRegionReplication() != getTableDescriptor().getRegionReplication()) { throw new IOException("REGION_REPLICATION change is not supported for enabled tables"); } @@ -79,6 +79,7 @@ public class ModifyTableHandler extends TableEventHandler { } // Update descriptor HTableDescriptor oldHtd = getTableDescriptor(); + this.htd.setTableState(oldHtd.getTableState()); // preserve table state this.masterServices.getTableDescriptors().add(this.htd); deleteFamilyFromFS(hris, oldHtd.getFamiliesKeys()); removeReplicaColumnsIfNeeded(this.htd.getRegionReplication(), oldHtd.getRegionReplication(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 7898edc..f7aa0d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -39,10 +40,8 @@ import org.apache.hadoop.hbase.master.DeadServer; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; /** @@ -231,23 +230,23 @@ public class ServerShutdownHandler extends EventHandler { continue; } LOG.info("Reassigning region with rs = " + rit); - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } else if (regionStates.isRegionInState( - hri, State.SPLITTING_NEW, State.MERGING_NEW)) { - regionStates.updateRegionState(hri, State.OFFLINE); + hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) { + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } toAssignRegions.add(hri); } else if (rit != null) { if ((rit.isPendingCloseOrClosing() || rit.isOffline()) && am.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || + TableState.State.DISABLED, TableState.State.DISABLING) || am.getReplicasToClose().contains(hri)) { // If the table was partially disabled and the RS went down, we should clear the RIT // and remove the node for the region. // The rit that we use may be stale in case the table was in DISABLING state // but though we did assign we will not be clearing the znode in CLOSING state. // Doing this will have no harm. See HBASE-5927 - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); am.offlineDisabledRegion(hri); } else { LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition " @@ -323,7 +322,7 @@ public class ServerShutdownHandler extends EventHandler { } // If table is not disabled but the region is offlined, boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); if (disabled){ LOG.info("The table " + hri.getTable() + " was disabled. Hence not proceeding."); @@ -336,7 +335,7 @@ public class ServerShutdownHandler extends EventHandler { return false; } boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); if (disabling) { LOG.info("The table " + hri.getTable() + " is disabled. Hence not assigning region" + hri.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 4f1c39d..e1a8753 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -40,12 +40,12 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import com.google.common.collect.Lists; @@ -130,7 +130,7 @@ public abstract class TableEventHandler extends EventHandler { handleTableOperation(hris); if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. getAssignmentManager().getTableStateManager().isTableState( - tableName, ZooKeeperProtos.Table.State.ENABLED)) { + tableName, TableState.State.ENABLED)) { if (reOpenAllRegions(hris)) { LOG.info("Completed table operation " + eventType + " on table " + tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index 086d1d5..eecb12d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -28,15 +28,16 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -93,54 +94,44 @@ public class TruncateTableHandler extends DeleteTableHandler { AssignmentManager assignmentManager = this.masterServices.getAssignmentManager(); - // 1. Set table znode - CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName); - try { - // 1. Create Table Descriptor - new FSTableDescriptors(server.getConfiguration()) - .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false); - Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); - Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); - - HRegionInfo[] newRegions; - if (this.preserveSplits) { - newRegions = regions.toArray(new HRegionInfo[regions.size()]); - LOG.info("Truncate will preserve " + newRegions.length + " regions"); - } else { - newRegions = new HRegionInfo[1]; - newRegions[0] = new HRegionInfo(this.tableName, null, null); - LOG.info("Truncate will not preserve the regions"); - } - - // 2. Create Regions - List regionInfos = ModifyRegionUtils.createRegions( - masterServices.getConfiguration(), tempdir, - this.hTableDescriptor, newRegions, null); - - // 3. Move Table temp directory to the hbase root location - if (!fs.rename(tempTableDir, tableDir)) { - throw new IOException("Unable to move table from temp=" + tempTableDir + - " to hbase root=" + tableDir); - } - - // 4. Add regions to META - MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), - regionInfos); - - // 5. Trigger immediate assignment of the regions in round-robin fashion - ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); - - // 6. Set table enabled flag up in zk. - try { - assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that " + tableName + " will be" + - " enabled because of a ZooKeeper issue", e); - } - } catch (IOException e) { - CreateTableHandler.removeEnablingTable(assignmentManager, tableName); - throw e; + // 1. Create Table Descriptor + HTableDescriptor underConstruction = new HTableDescriptor(this.hTableDescriptor); + underConstruction.setTableState(TableState.State.ENABLING); + new FSTableDescriptors(server.getConfiguration()) + .createTableDescriptorForTableDirectory(tempdir, underConstruction, false); + Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); + Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); + + HRegionInfo[] newRegions; + if (this.preserveSplits) { + newRegions = regions.toArray(new HRegionInfo[regions.size()]); + LOG.info("Truncate will preserve " + newRegions.length + " regions"); + } else { + newRegions = new HRegionInfo[1]; + newRegions[0] = new HRegionInfo(this.tableName, null, null); + LOG.info("Truncate will not preserve the regions"); } + + // 2. Create Regions + List regionInfos = ModifyRegionUtils.createRegions( + masterServices.getConfiguration(), tempdir, + this.hTableDescriptor, newRegions, null); + + // 3. Move Table temp directory to the hbase root location + if (!fs.rename(tempTableDir, tableDir)) { + throw new IOException("Unable to move table from temp=" + tempTableDir + + " to hbase root=" + tableDir); + } + + // 4. Add regions to META + MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), + regionInfos); + + // 5. Trigger immediate assignment of the regions in round-robin fashion + ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); + + // 6. Set table enabled flag up in zk. + assignmentManager.getTableStateManager().setTableState(tableName, + TableState.State.ENABLED); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 3bf704a..f798561 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -62,7 +63,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; @@ -558,14 +558,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable TableName snapshotTable = TableName.valueOf(snapshot.getTable()); AssignmentManager assignmentMgr = master.getAssignmentManager(); if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { LOG.debug("Table enabled, starting distributed snapshot."); snapshotEnabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); } // For disabled table, snapshot is created by the master else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.DISABLED)) { + TableState.State.DISABLED)) { LOG.debug("Table is disabled, running snapshot entirely on master."); snapshotDisabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); @@ -696,7 +696,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // Execute the restore/clone operation if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) { if (master.getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf(fsSnapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) { + TableName.valueOf(fsSnapshot.getTable()), TableState.State.ENABLED)) { throw new UnsupportedOperationException("Table '" + TableName.valueOf(fsSnapshot.getTable()) + "' must be disabled in order to " + "perform a restore operation" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 2df9f50..1a4e1bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -46,6 +46,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -56,18 +59,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.ConnectionUtils; @@ -77,6 +79,7 @@ import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.master.SplitLogManager; @@ -92,7 +95,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; @@ -113,10 +115,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.RemoteException; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.protobuf.ServiceException; - /** * This class is responsible for splitting up a bunch of regionserver commit log * files that are no longer being written to, into new files, one per region for @@ -137,8 +135,6 @@ public class HLogSplitter { OutputSink outputSink; EntryBuffers entryBuffers; - private Set disablingOrDisabledTables = - new HashSet(); private ZooKeeperWatcher watcher; private CoordinatedStateManager csm; @@ -289,15 +285,6 @@ public class HLogSplitter { LOG.warn("Nothing to split in log file " + logPath); return true; } - if(watcher != null && csm != null) { - try { - TableStateManager tsm = csm.getTableStateManager(); - disablingOrDisabledTables = tsm.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); - } catch (CoordinatedStateException e) { - throw new IOException("Can't get disabling/disabled tables", e); - } - } int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); int numOpenedFilesLastCheck = 0; outputSink.setReporter(reporter); @@ -1433,7 +1420,9 @@ public class HLogSplitter { } // check if current region in a disabling or disabled table - if (disablingOrDisabledTables.contains(buffer.tableName)) { + TableState state = csm.getServer().getShortCircuitConnection() + .getTableState(buffer.tableName); + if (state.equals(TableState.State.DISABLED) || state.equals(TableState.State.DISABLING)) { // need fall back to old way logRecoveredEditsOutputSink.append(buffer); hasEditsInDisablingOrDisabledTables = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index efcd7cd..95f354d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InterruptedIOException; import java.io.PrintWriter; import java.io.StringWriter; import java.net.URI; @@ -87,6 +86,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -105,7 +105,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl; import org.apache.hadoop.hbase.util.hbck.TableLockChecker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.io.IOUtils; @@ -1380,22 +1379,15 @@ public class HBaseFsck extends Configured { * @throws IOException */ private void loadDisabledTables() - throws ZooKeeperConnectionException, IOException { + throws IOException { HConnectionManager.execute(new HConnectable(getConf()) { @Override public Void connect(HConnection connection) throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - for (TableName tableName : - ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) { - disabledTables.add(tableName); - } - } catch (KeeperException ke) { - throw new IOException(ke); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); + HTableDescriptor[] tables = connection.listTables(); + for (HTableDescriptor table : tables) { + TableState.State state = table.getTableState(); + if (state.equals(TableState.State.DISABLED) || state.equals(TableState.State.DISABLING)) + disabledTables.add(table.getTableName()); } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index f773b06..5425548 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -141,21 +141,7 @@ public class ZKDataMigrator extends Configured implements Tool { LOG.info("No table present to migrate table state to PB. returning.."); return; } - for (String table : tables) { - String znode = ZKUtil.joinZNode(zkw.tableZNode, table); - // Delete -ROOT- table state znode since its no longer present in 0.95.0 - // onwards. - if (table.equals("-ROOT-") || table.equals(".META.")) { - ZKUtil.deleteNode(zkw, znode); - continue; - } - byte[] data = ZKUtil.getData(zkw, znode); - if (ProtobufUtil.isPBMagicPrefix(data)) continue; - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data))); - data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(zkw, znode, data); - } + ZKUtil.deleteNodeRecursively(zkw, zkw.tableZNode); } private void checkAndMigrateReplicationNodesToPB(ZooKeeperWatcher zkw) throws KeeperException, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java deleted file mode 100644 index 1aff12f..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.io.InterruptedIOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Implementation of TableStateManager which reads, caches and sets state - * up in ZooKeeper. If multiple read/write clients, will make for confusion. - * Code running on client side without consensus context should use - * {@link ZKTableStateClientSideReader} instead. - * - *

To save on trips to the zookeeper ensemble, internally we cache table - * state. - */ -@InterfaceAudience.Private -public class ZKTableStateManager implements TableStateManager { - // A znode will exist under the table directory if it is in any of the - // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, - // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will - // be no entry for a table in zk. Thats how it currently works. - - private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class); - private final ZooKeeperWatcher watcher; - - /** - * Cache of what we found in zookeeper so we don't have to go to zk ensemble - * for every query. Synchronize access rather than use concurrent Map because - * synchronization needs to span query of zk. - */ - private final Map cache = - new HashMap(); - - public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - super(); - this.watcher = zkw; - populateTableStates(); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @throws KeeperException, InterruptedException - */ - private void populateTableStates() throws KeeperException, InterruptedException { - synchronized (this.cache) { - List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); - if (children == null) return; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName); - if (state != null) this.cache.put(tableName, state); - } - } - } - - /** - * Sets table state in ZK. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException { - synchronized (this.cache) { - LOG.warn("Moving table " + tableName + " state from " + this.cache.get(tableName) - + " to " + state); - try { - setTableStateInZK(tableName, state); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - // Transition ENABLED->DISABLING has to be performed with a hack, because - // we treat empty state as enabled in this case because 0.92- clusters. - if ( - (newState == ZooKeeperProtos.Table.State.DISABLING) && - this.cache.get(tableName) != null && !isTableState(tableName, states) || - (newState != ZooKeeperProtos.Table.State.DISABLING && - !isTableState(tableName, states) )) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfNotInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - private void setTableStateInZK(final TableName tableName, - final ZooKeeperProtos.Table.State state) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); - if (ZKUtil.checkExists(this.watcher, znode) == -1) { - ZKUtil.createAndFailSilent(this.watcher, znode); - } - synchronized (this.cache) { - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(state); - byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.watcher, znode, data); - this.cache.put(tableName, state); - } - } - - /** - * Checks if table is marked in specified state in ZK. - * - * {@inheritDoc} - */ - @Override - public boolean isTableState(final TableName tableName, - final ZooKeeperProtos.Table.State... states) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); - return isTableInState(Arrays.asList(states), currentState); - } - } - - /** - * Deletes the table in zookeeper. Fails silently if the - * table is not currently disabled in zookeeper. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setDeletedTable(final TableName tableName) - throws CoordinatedStateException { - synchronized (this.cache) { - if (this.cache.remove(tableName) == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was " + - "already deleted"); - } - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * check if table is present. - * - * @param tableName table we're working on - * @return true if the table is present - */ - @Override - public boolean isTablePresent(final TableName tableName) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State state = this.cache.get(tableName); - return !(state == null); - } - } - - /** - * Gets a list of all the tables set as disabling in zookeeper. - * @return Set of disabling tables, empty Set if none - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - @Override - public Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException { - try { - return getAllTables(states); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - this.cache.remove(tableName); - if (deletePermanentState) { - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - } - } - - /** - * Gets a list of all the tables of specified states in zookeeper. - * @return Set of tables of specified states, empty Set if none - * @throws KeeperException - */ - Set getAllTables(final ZooKeeperProtos.Table.State... states) - throws KeeperException, InterruptedIOException { - - Set allTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode); - if(children == null) return allTables; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state; - try { - state = getTableState(watcher, tableName); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - for (ZooKeeperProtos.Table.State expectedState: states) { - if (state == expectedState) { - allTables.add(tableName); - break; - } - } - } - return allTables; - } - - /** - * Gets table state from ZK. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - /** - * @return true if current state isn't null and is contained - * in the list of expected states. - */ - private boolean isTableInState(final List expectedStates, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && expectedStates.contains(currentState); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index c24b4e1..33b349e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -59,13 +59,11 @@ import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.HLogUtilsForTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; import org.junit.AfterClass; @@ -257,7 +255,7 @@ public class TestAdmin { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); + ht.getName(), TableState.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -272,7 +270,7 @@ public class TestAdmin { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); + ht.getName(), TableState.State.ENABLED)); // Test that table is enabled try { @@ -345,7 +343,7 @@ public class TestAdmin { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED)); + TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); } @Test (timeout=300000) @@ -1126,8 +1124,7 @@ public class TestAdmin { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testMasterAdmin"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ZKTableStateClientSideReader.isEnabledTable(zkw, - TableName.valueOf("testMasterAdmin"))) { + while (!this.admin.isTableEnabled(TableName.valueOf("testMasterAdmin"))) { Thread.sleep(10); } this.admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index f8e87dd..9385030 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -51,15 +51,14 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -603,9 +602,9 @@ public class TestAssignmentManagerOnCluster { } } am.regionOffline(hri); - am.getRegionStates().updateRegionState(hri, State.PENDING_OPEN, destServerName); + am.getRegionStates().updateRegionState(hri, RegionState.State.PENDING_OPEN, destServerName); - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING); + am.getTableStateManager().setTableState(table, TableState.State.DISABLING); List toAssignRegions = am.processServerShutdown(destServerName); assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.", am.getRegionStates() @@ -614,7 +613,7 @@ public class TestAssignmentManagerOnCluster { if (hri != null && serverName != null) { am.regionOnline(hri, serverName); } - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED); + am.getTableStateManager().setTableState(table, TableState.State.DISABLED); TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 288d115..fbb46f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -407,6 +407,11 @@ public class TestCatalogJanitor { } @Override + public TableStateManager getTableStateManager() { + return null; + } + + @Override public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible) throws IOException { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index dc45b26..8a8cb96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; @@ -81,7 +81,7 @@ public class TestMaster { HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME); assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME, - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); TEST_UTIL.loadTable(ht, FAMILYNAME, false); ht.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index 7ff8ca1..02c1028 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.junit.Test; @@ -93,8 +93,8 @@ public class TestMasterRestartAfterDisablingTable { assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING)); + TableName.valueOf("tableRestart"), TableState.State.DISABLED, + TableState.State.DISABLING)); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -109,7 +109,7 @@ public class TestMasterRestartAfterDisablingTable { 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager() - .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED)); + .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java new file mode 100644 index 0000000..30cd629 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java @@ -0,0 +1,99 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.zookeeper.KeeperException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@Category(MediumTests.class) +public class TestTableStateManager { + private static final Log LOG = LogFactory.getLog(TestTableStateManager.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testTableStates() + throws IOException, KeeperException, InterruptedException { + final TableName name = + TableName.valueOf("testDisabled"); + TEST_UTIL.getHBaseAdmin().createTable(new HTableDescriptor(name)); + TableStateManager zkt = new TableStateManager(TEST_UTIL.getMiniHBaseCluster().getMaster()); + assertFalse(zkt.isTableState(name, TableState.State.ENABLED)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED)); + assertFalse(zkt.isTableState(name, TableState.State.ENABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(zkt.isTablePresent(name)); + zkt.setTableState(name, TableState.State.DISABLING); + assertTrue(zkt.isTableState(name, TableState.State.DISABLING)); + assertTrue(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(zkt.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, TableState.State.DISABLED); + assertTrue(zkt.isTableState(name, TableState.State.DISABLED)); + assertTrue(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLING)); + assertTrue(zkt.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, TableState.State.ENABLING); + assertTrue(zkt.isTableState(name, TableState.State.ENABLING)); + assertTrue(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED)); + assertFalse(zkt.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(zkt.isTablePresent(name)); + zkt.setTableState(name, TableState.State.ENABLED); + assertTrue(zkt.isTableState(name, TableState.State.ENABLED)); + assertFalse(zkt.isTableState(name, TableState.State.ENABLING)); + assertTrue(zkt.isTablePresent(name)); + zkt.setDeletedTable(name); + assertFalse(zkt.isTableState(name, TableState.State.ENABLED)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED)); + assertFalse(zkt.isTableState(name, TableState.State.ENABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(zkt.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(zkt.isTablePresent(name)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java deleted file mode 100644 index f5210cc..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; - -@Category(MediumTests.class) -public class TestZKTableStateManager { - private static final Log LOG = LogFactory.getLog(TestZKTableStateManager.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniZKCluster(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniZKCluster(); - } - - @Test - public void testTableStates() - throws CoordinatedStateException, IOException, KeeperException, InterruptedException { - final TableName name = - TableName.valueOf("testDisabled"); - Abortable abortable = new Abortable() { - @Override - public void abort(String why, Throwable e) { - LOG.info(why, e); - } - - @Override - public boolean isAborted() { - return false; - } - - }; - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - name.getNameAsString(), abortable, true); - TableStateManager zkt = new ZKTableStateManager(zkw); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLING); - assertTrue(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLED); - assertTrue(zkt.isTableState(name, Table.State.DISABLED)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLING); - assertTrue(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLED); - assertTrue(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTablePresent(name)); - zkt.setDeletedTable(name); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - } -}