diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 9d1570d..5b4f7c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -170,6 +170,11 @@ class ConnectionAdapter implements ClusterConnection { } @Override + public TableState getTableState(TableName tableName) throws IOException { + return wrappedConnection.getTableState(tableName); + } + + @Override public HTableDescriptor[] listTables() throws IOException { return wrappedConnection.listTables(); } @@ -435,4 +440,4 @@ class ConnectionAdapter implements ClusterConnection { public AsyncProcess getAsyncProcess() { return wrappedConnection.getAsyncProcess(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 7c9c0b9..bbf180e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -176,6 +176,8 @@ import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * An internal, A non-instantiable class that manages creation of {@link HConnection}s. */ @@ -893,7 +895,7 @@ class ConnectionManager { @Override public boolean isTableEnabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, true); + return getTableState(tableName).inStates(TableState.State.ENABLED); } @Override @@ -903,7 +905,7 @@ class ConnectionManager { @Override public boolean isTableDisabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, false); + return getTableState(tableName).inStates(TableState.State.DISABLED); } @Override @@ -1993,6 +1995,13 @@ class ConnectionManager { } @Override + public GetTableStateResponse getTableState( + RpcController controller, GetTableStateRequest request) + throws ServiceException { + return stub.getTableState(controller, request); + } + + @Override public void close() { release(this.mss); } @@ -2498,6 +2507,20 @@ class ConnectionManager { throws IOException { return getHTableDescriptor(TableName.valueOf(tableName)); } + + @Override + public TableState getTableState(TableName tableName) throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + GetTableStateResponse resp = master.getTableState(null, + RequestConverter.buildGetTableStateRequest(tableName)); + return TableState.convert(resp.getTableState()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index cd11a52..918c944 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -208,6 +208,13 @@ public interface HConnection extends Abortable, Closeable { boolean isTableDisabled(byte[] tableName) throws IOException; /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ + public TableState getTableState(TableName tableName) throws IOException; + + /** * @param tableName table name * @return true if all regions of the table are available, false otherwise * @throws IOException if a remote or network exception occurs @@ -576,4 +583,4 @@ public interface HConnection extends Abortable, Closeable { * @deprecated internal method, do not use thru HConnection */ @Deprecated public NonceGenerator getNonceGenerator(); -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index aab547e..89c8cef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -44,14 +44,8 @@ interface Registry { String getClusterId(); /** - * @param enabled Return true if table is enabled - * @throws IOException - */ - boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; - - /** * @return Count of 'running' regionservers * @throws IOException */ int getCurrentNrHRS() throws IOException; -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java new file mode 100644 index 0000000..8738b81 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +/** + * Represents table state. + */ +@InterfaceAudience.Private +public class TableState { + + @InterfaceAudience.Public + @InterfaceStability.Evolving + public static enum State { + ENABLED, + DISABLED, + DISABLING, + ENABLING; + + /** + * Covert from PB version of State + * + * @param tableState convert from + * @return POJO + */ + public static State convert(HBaseProtos.TableState.State state) { + State ret; + switch (state) { + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); + } + return ret; + } + + /** + * Covert to PB version of State + * + * @param tableState convert from + * @return PB + */ + public HBaseProtos.TableState.State convert() { + HBaseProtos.TableState.State state; + switch (this) { + case ENABLED: + state = HBaseProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = HBaseProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = HBaseProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = HBaseProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(this.toString()); + } + return state; + } + + } + + private final long timestamp; + private final TableName tableName; + private final State state; + + /** + * Create instance of TableState. + * @param state table state + */ + public TableState(TableName tableName, State state, long timestamp) { + this.tableName = tableName; + this.state = state; + this.timestamp = timestamp; + } + + /** + * Create instance of TableState with current timestamp + * + * @param tableName table for which state is created + * @param state state of the table + */ + public TableState(TableName tableName, State state) { + this(tableName, state, System.currentTimeMillis()); + } + + /** + * @return table state + */ + public State getState() { + return state; + } + + /** + * Timestamp of table state + * + * @return milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Table name for state + * + * @return milliseconds + */ + public TableName getTableName() { + return tableName; + } + + /** + * Check that table in given states + * @param state state + * @return true if satisfies + */ + public boolean inStates(State state) { + return this.state.equals(state); + } + + /** + * Check that table in given states + * @param states state list + * @return true if satisfies + */ + public boolean inStates(State... states) { + for (State s : states) { + if (s.equals(this.state)) + return true; + } + return false; + } + + + /** + * Covert to PB version of TableState + * @param tableState convert from + * @return PB + */ + public HBaseProtos.TableState convert() { + return HBaseProtos.TableState.newBuilder() + .setState(this.state.convert()) + .setTable(ProtobufUtil.toProtoTableName(this.tableName)) + .setTimestamp(this.timestamp) + .build(); + } + + /** + * Covert from PB version of TableState + * @param tableState convert from + * @return POJO + */ + public static TableState convert(HBaseProtos.TableState tableState) { + TableState.State state = State.convert(tableState.getState()); + return new TableState(ProtobufUtil.toTableName(tableState.getTable()), + state, tableState.getTimestamp()); + } + + /** + * Static version of state checker + * @param state desired + * @param target equals to any of + * @return true if satisfies + */ + public static boolean isInStates(State state, State... target) { + for (State tableState : target) { + if (state.equals(tableState)) + return true; + } + return false; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 9123d50..4d3cc3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -18,18 +18,17 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.io.InterruptedIOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; @@ -98,24 +97,6 @@ class ZooKeeperRegistry implements Registry { } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); - try { - if (enabled) { - return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName); - } - return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName); - } catch (KeeperException e) { - throw new IOException("Enable/Disable failed", e); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); - } - } - - @Override public int getCurrentNrHRS() throws IOException { ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); try { @@ -128,4 +109,4 @@ class ZooKeeperRegistry implements Registry { zkw.close(); } } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index d6bcb29..ab764a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.protobuf; import java.io.IOException; import java.util.List; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.classification.InterfaceAudience; @@ -106,6 +107,8 @@ import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.ByteString; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * Helper utility to build protocol buffer requests, * or build components for protocol buffer requests. @@ -1177,6 +1180,19 @@ public final class RequestConverter { } /** + * Creates a protocol buffer GetTableStateRequest + * + * @param tableName table to get request for + * @return a GetTableStateRequest + */ + public static GetTableStateRequest buildGetTableStateRequest( + final TableName tableName) { + return GetTableStateRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); + } + + /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table * * @param tableName the table name diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java deleted file mode 100644 index 94bd31e..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import com.google.protobuf.InvalidProtocolBufferException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Non-instantiable class that provides helper functions to learn - * about HBase table state for code running on client side (hence, not having - * access to consensus context). - * - * Doesn't cache any table state, just goes directly to ZooKeeper. - * TODO: decouple this class from ZooKeeper. - */ -@InterfaceAudience.Private -public class ZKTableStateClientSideReader { - - private ZKTableStateClientSideReader() {} - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} - * of {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || - isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED || - state == ZooKeeperProtos.Table.State.DISABLING) - disabledTables.add(tableName); - } - return disabledTables; - } - - static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && currentState.equals(expectedState); - } - - /** - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index df5e693..398ab9e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -129,12 +129,6 @@ public class TestClientNoCluster extends Configured implements Tool { } @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - return enabled; - } - - @Override public int getCurrentNrHRS() throws IOException { return 1; } @@ -813,4 +807,4 @@ public class TestClientNoCluster extends Configured implements Tool { public static void main(String[] args) throws Exception { System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args)); } -} \ No newline at end of file +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index dd55599..2f5322d 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -2404,6 +2404,1576 @@ public final class HBaseProtos { // @@protoc_insertion_point(class_scope:TableSchema) } + public interface TableStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState.State state = 1; + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + boolean hasState(); + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + + // required .TableName table = 2; + /** + * required .TableName table = 2; + */ + boolean hasTable(); + /** + * required .TableName table = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .TableName table = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // optional uint64 timestamp = 3; + /** + * optional uint64 timestamp = 3; + */ + boolean hasTimestamp(); + /** + * optional uint64 timestamp = 3; + */ + long getTimestamp(); + } + /** + * Protobuf type {@code TableState} + * + *
+   ** Denotes state of the table 
+   * 
+ */ + public static final class TableState extends + com.google.protobuf.GeneratedMessage + implements TableStateOrBuilder { + // Use TableState.newBuilder() to construct. + private TableState(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableState defaultInstance; + public static TableState getDefaultInstance() { + return defaultInstance; + } + + public TableState getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + timestamp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code TableState.State} + * + *
+     * Table's current state
+     * 
+ */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ENABLED = 0; + */ + ENABLED(0, 0), + /** + * DISABLED = 1; + */ + DISABLED(1, 1), + /** + * DISABLING = 2; + */ + DISABLING(2, 2), + /** + * ENABLING = 3; + */ + ENABLING(3, 3), + ; + + /** + * ENABLED = 0; + */ + public static final int ENABLED_VALUE = 0; + /** + * DISABLED = 1; + */ + public static final int DISABLED_VALUE = 1; + /** + * DISABLING = 2; + */ + public static final int DISABLING_VALUE = 2; + /** + * ENABLING = 3; + */ + public static final int ENABLING_VALUE = 3; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + case 2: return DISABLING; + case 3: return ENABLING; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TableState.State) + } + + private int bitField0_; + // required .TableState.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + + // required .TableName table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .TableName table = 2; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // optional uint64 timestamp = 3; + public static final int TIMESTAMP_FIELD_NUMBER = 3; + private long timestamp_; + /** + * optional uint64 timestamp = 3; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 timestamp = 3; + */ + public long getTimestamp() { + return timestamp_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + timestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, table_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, timestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, table_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableState} + * + *
+     ** Denotes state of the table 
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + if (!hasTable()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + onChanged(); + return this; + } + + // required .TableName table = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .TableName table = 2; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .TableName table = 2; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .TableName table = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // optional uint64 timestamp = 3; + private long timestamp_ ; + /** + * optional uint64 timestamp = 3; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 timestamp = 3; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * optional uint64 timestamp = 3; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000004; + timestamp_ = value; + onChanged(); + return this; + } + /** + * optional uint64 timestamp = 3; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableState) + } + + static { + defaultInstance = new TableState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableState) + } + + public interface TableDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableSchema schema = 1; + /** + * required .TableSchema schema = 1; + */ + boolean hasSchema(); + /** + * required .TableSchema schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema(); + /** + * required .TableSchema schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); + + // optional .TableState.State state = 2 [default = ENABLED]; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + boolean hasState(); + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + } + /** + * Protobuf type {@code TableDescriptor} + * + *
+   ** On HDFS representation of table state. 
+   * 
+ */ + public static final class TableDescriptor extends + com.google.protobuf.GeneratedMessage + implements TableDescriptorOrBuilder { + // Use TableDescriptor.newBuilder() to construct. + private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableDescriptor defaultInstance; + public static TableDescriptor getDefaultInstance() { + return defaultInstance; + } + + public TableDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = schema_.toBuilder(); + } + schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(schema_); + schema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + state_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableSchema schema = 1; + public static final int SCHEMA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_; + /** + * required .TableSchema schema = 1; + */ + public boolean hasSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + return schema_; + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + return schema_; + } + + // optional .TableState.State state = 2 [default = ENABLED]; + public static final int STATE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + + private void initFields() { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, schema_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, schema_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj; + + boolean result = true; + result = result && (hasSchema() == other.hasSchema()); + if (hasSchema()) { + result = result && getSchema() + .equals(other.getSchema()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSchema()) { + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableDescriptor} + * + *
+     ** On HDFS representation of table state. 
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + schemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (schemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = schemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this; + if (other.hasSchema()) { + mergeSchema(other.getSchema()); + } + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSchema()) { + + return false; + } + if (!getSchema().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableSchema schema = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_; + /** + * required .TableSchema schema = 1; + */ + public boolean hasSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + if (schemaBuilder_ == null) { + return schema_; + } else { + return schemaBuilder_.getMessage(); + } + } + /** + * required .TableSchema schema = 1; + */ + public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + schemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder setSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (schemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + schemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + schema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + schemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder clearSchema() { + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + schemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSchemaFieldBuilder().getBuilder(); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + if (schemaBuilder_ != null) { + return schemaBuilder_.getMessageOrBuilder(); + } else { + return schema_; + } + } + /** + * required .TableSchema schema = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getSchemaFieldBuilder() { + if (schemaBuilder_ == null) { + schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + return schemaBuilder_; + } + + // optional .TableState.State state = 2 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + state_ = value; + onChanged(); + return this; + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000002); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableDescriptor) + } + + static { + defaultInstance = new TableDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableDescriptor) + } + public interface ColumnFamilySchemaOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -16372,6 +17942,16 @@ public final class HBaseProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableSchema_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_ColumnFamilySchema_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -16486,47 +18066,53 @@ public final class HBaseProtos { "Name\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPai" + "r\022,\n\017column_families\030\003 \003(\0132\023.ColumnFamil" + "ySchema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr" + - "ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " + - "\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" + - "\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" + - "\"\232\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta", - "ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" + - "\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" + - "\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\"1\n" + - "\014FavoredNodes\022!\n\014favored_node\030\001 \003(\0132\013.Se" + - "rverName\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002" + - "(\0162$.RegionSpecifier.RegionSpecifierType" + - "\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017" + - "\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002" + - "\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"" + - "A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port", - "\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocesso" + - "r\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004nam" + - "e\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022" + - "\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesByt" + - "esPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n" + - "\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 " + - "\001(\003\"\314\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + - "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" + - "\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescription.T" + - "ype:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006 \001", - "(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tS" + - "KIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\tsi" + - "gnature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreat" + - "ion_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003(\013" + - "2\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongMsg" + - "\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndoubl" + - "e_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdecim" + - "al_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001" + - " \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespace" + - "Descriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfiguratio", - "n\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServer" + - "Info\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022\010\n" + - "\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n" + - "\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GR" + - "EATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop." + - "hbase.protobuf.generatedB\013HBaseProtosH\001\240" + - "\001\001" + "ingPair\"\235\001\n\nTableState\022 \n\005state\030\001 \002(\0162\021." + + "TableState.State\022\031\n\005table\030\002 \002(\0132\n.TableN" + + "ame\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013\n\007ENABL" + + "ED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENA", + "BLING\020\003\"Z\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + + "(\0132\014.TableSchema\022)\n\005state\030\002 \001(\0162\021.TableS" + + "tate.State:\007ENABLED\"o\n\022ColumnFamilySchem" + + "a\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + + "tesBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Na" + + "meStringPair\"\232\001\n\nRegionInfo\022\021\n\tregion_id" + + "\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021" + + "\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007of" + + "fline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id" + + "\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored_nod", + "e\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpecifier" + + "\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" + + "pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" + + "cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" + + "EGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022" + + "\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_name\030\001" + + " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + + "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + + "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + + "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014", + "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + + "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescription\022" + + "\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation" + + "_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotD" + + "escription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022" + + "\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005" + + "FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescr" + + "iption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 " + + "\001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigu", + "ration\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMs" + + "g\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDouble" + + "Msg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg" + + "\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016leas" + + "t_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"" + + "K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\r" + + "configuration\030\002 \003(\0132\017.NameStringPair\"$\n\020" + + "RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Co" + + "mpareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t" + + "\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_E", + "QUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.ap" + + "ache.hadoop.hbase.protobuf.generatedB\013HB" + + "aseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -16545,122 +18131,134 @@ public final class HBaseProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSchema_descriptor, new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }); - internal_static_ColumnFamilySchema_descriptor = + internal_static_TableState_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_TableState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableState_descriptor, + new java.lang.String[] { "State", "Table", "Timestamp", }); + internal_static_TableDescriptor_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_TableDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableDescriptor_descriptor, + new java.lang.String[] { "Schema", "State", }); + internal_static_ColumnFamilySchema_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ColumnFamilySchema_descriptor, new java.lang.String[] { "Name", "Attributes", "Configuration", }); internal_static_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInfo_descriptor, new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", }); internal_static_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FavoredNodes_descriptor, new java.lang.String[] { "FavoredNode", }); internal_static_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionSpecifier_descriptor, new java.lang.String[] { "Type", "Value", }); internal_static_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(8); internal_static_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TimeRange_descriptor, new java.lang.String[] { "From", "To", }); internal_static_ServerName_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(9); internal_static_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }); internal_static_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(10); internal_static_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Coprocessor_descriptor, new java.lang.String[] { "Name", }); internal_static_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameStringPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameBytesPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BytesBytesPair_descriptor, new java.lang.String[] { "First", "Second", }); internal_static_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameInt64Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotDescription_descriptor, new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", }); internal_static_ProcedureDescription_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_ProcedureDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ProcedureDescription_descriptor, new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", }); internal_static_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EmptyMsg_descriptor, new java.lang.String[] { }); internal_static_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LongMsg_descriptor, new java.lang.String[] { "LongMsg", }); internal_static_DoubleMsg_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_DoubleMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DoubleMsg_descriptor, new java.lang.String[] { "DoubleMsg", }); internal_static_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BigDecimalMsg_descriptor, new java.lang.String[] { "BigdecimalMsg", }); internal_static_UUID_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }); internal_static_NamespaceDescriptor_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_NamespaceDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NamespaceDescriptor_descriptor, new java.lang.String[] { "Name", "Configuration", }); internal_static_RegionServerInfo_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_RegionServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerInfo_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index ee1ab67..3189bd4 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -36655,6 +36655,1128 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:GetTableNamesResponse) } + public interface GetTableStateRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class GetTableStateRequest extends + com.google.protobuf.GeneratedMessage + implements GetTableStateRequestOrBuilder { + // Use GetTableStateRequest.newBuilder() to construct. + private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateRequest defaultInstance; + public static GetTableStateRequest getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateRequest) + } + + static { + defaultInstance = new GetTableStateRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateRequest) + } + + public interface GetTableStateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState table_state = 1; + /** + * required .TableState table_state = 1; + */ + boolean hasTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class GetTableStateResponse extends + com.google.protobuf.GeneratedMessage + implements GetTableStateResponseOrBuilder { + // Use GetTableStateResponse.newBuilder() to construct. + private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateResponse defaultInstance; + public static GetTableStateResponse getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableState_.toBuilder(); + } + tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableState_); + tableState_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableState table_state = 1; + public static final int TABLE_STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + return tableState_; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + return tableState_; + } + + private void initFields() { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableState()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableState().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableState_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableState_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; + + boolean result = true; + result = result && (hasTableState() == other.hasTableState()); + if (hasTableState()) { + result = result && getTableState() + .equals(other.getTableState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableState()) { + hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getTableState().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableStateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableStateBuilder_ == null) { + result.tableState_ = tableState_; + } else { + result.tableState_ = tableStateBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; + if (other.hasTableState()) { + mergeTableState(other.getTableState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableState()) { + + return false; + } + if (!getTableState().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState table_state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + if (tableStateBuilder_ == null) { + return tableState_; + } else { + return tableStateBuilder_.getMessage(); + } + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableState_ = value; + onChanged(); + } else { + tableStateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) { + if (tableStateBuilder_ == null) { + tableState_ = builderForValue.build(); + onChanged(); + } else { + tableStateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) { + tableState_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial(); + } else { + tableState_ = value; + } + onChanged(); + } else { + tableStateBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder clearTableState() { + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + onChanged(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableStateFieldBuilder().getBuilder(); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + if (tableStateBuilder_ != null) { + return tableStateBuilder_.getMessageOrBuilder(); + } else { + return tableState_; + } + } + /** + * required .TableState table_state = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> + getTableStateFieldBuilder() { + if (tableStateBuilder_ == null) { + tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>( + tableState_, + getParentForChildren(), + isClean()); + tableState_ = null; + } + return tableStateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateResponse) + } + + static { + defaultInstance = new GetTableStateResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateResponse) + } + public interface GetClusterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -41176,6 +42298,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+       ** returns table state 
+       * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -41525,6 +42659,14 @@ public final class MasterProtos { impl.listTableNamesByNamespace(controller, request, done); } + @java.lang.Override + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableState(controller, request, done); + } + }; } @@ -41633,6 +42775,8 @@ public final class MasterProtos { return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 42: return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + case 43: + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41733,6 +42877,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41833,6 +42979,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42383,6 +43531,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+     ** returns table state 
+     * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -42620,6 +43780,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 43: + this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -42720,6 +43885,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42820,6 +43987,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -43485,6 +44654,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance())); } + + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -43707,6 +44891,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -44231,6 +45420,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -44607,6 +45808,16 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetTableNamesResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetClusterStatusRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -44752,97 +45963,101 @@ public final class MasterProtos { "ponse\022\"\n\014table_schema\030\001 \003(\0132\014.TableSchem" + "a\"\026\n\024GetTableNamesRequest\"8\n\025GetTableNam" + "esResponse\022\037\n\013table_names\030\001 \003(\0132\n.TableN" + - "ame\"\031\n\027GetClusterStatusRequest\"B\n\030GetClu" + - "sterStatusResponse\022&\n\016cluster_status\030\001 \002" + - "(\0132\016.ClusterStatus\"\030\n\026IsMasterRunningReq", - "uest\"4\n\027IsMasterRunningResponse\022\031\n\021is_ma" + - "ster_running\030\001 \002(\010\"@\n\024ExecProcedureReque" + - "st\022(\n\tprocedure\030\001 \002(\0132\025.ProcedureDescrip" + - "tion\"F\n\025ExecProcedureResponse\022\030\n\020expecte" + - "d_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026" + - "IsProcedureDoneRequest\022(\n\tprocedure\030\001 \001(" + - "\0132\025.ProcedureDescription\"W\n\027IsProcedureD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snap" + - "shot\030\002 \001(\0132\025.ProcedureDescription2\365\027\n\rMa" + - "sterService\022S\n\024GetSchemaAlterStatus\022\034.Ge", - "tSchemaAlterStatusRequest\032\035.GetSchemaAlt" + - "erStatusResponse\022P\n\023GetTableDescriptors\022" + - "\033.GetTableDescriptorsRequest\032\034.GetTableD" + - "escriptorsResponse\022>\n\rGetTableNames\022\025.Ge" + - "tTableNamesRequest\032\026.GetTableNamesRespon" + - "se\022G\n\020GetClusterStatus\022\030.GetClusterStatu" + - "sRequest\032\031.GetClusterStatusResponse\022D\n\017I" + - "sMasterRunning\022\027.IsMasterRunningRequest\032" + - "\030.IsMasterRunningResponse\0222\n\tAddColumn\022\021" + - ".AddColumnRequest\032\022.AddColumnResponse\022;\n", - "\014DeleteColumn\022\024.DeleteColumnRequest\032\025.De" + - "leteColumnResponse\022;\n\014ModifyColumn\022\024.Mod" + - "ifyColumnRequest\032\025.ModifyColumnResponse\022" + - "5\n\nMoveRegion\022\022.MoveRegionRequest\032\023.Move" + - "RegionResponse\022Y\n\026DispatchMergingRegions" + - "\022\036.DispatchMergingRegionsRequest\032\037.Dispa" + - "tchMergingRegionsResponse\022;\n\014AssignRegio" + - "n\022\024.AssignRegionRequest\032\025.AssignRegionRe" + - "sponse\022A\n\016UnassignRegion\022\026.UnassignRegio" + - "nRequest\032\027.UnassignRegionResponse\022>\n\rOff", - "lineRegion\022\025.OfflineRegionRequest\032\026.Offl" + - "ineRegionResponse\0228\n\013DeleteTable\022\023.Delet" + - "eTableRequest\032\024.DeleteTableResponse\022>\n\rt" + - "runcateTable\022\025.TruncateTableRequest\032\026.Tr" + - "uncateTableResponse\0228\n\013EnableTable\022\023.Ena" + - "bleTableRequest\032\024.EnableTableResponse\022;\n" + - "\014DisableTable\022\024.DisableTableRequest\032\025.Di" + - "sableTableResponse\0228\n\013ModifyTable\022\023.Modi" + - "fyTableRequest\032\024.ModifyTableResponse\0228\n\013" + - "CreateTable\022\023.CreateTableRequest\032\024.Creat", - "eTableResponse\022/\n\010Shutdown\022\020.ShutdownReq" + - "uest\032\021.ShutdownResponse\0225\n\nStopMaster\022\022." + - "StopMasterRequest\032\023.StopMasterResponse\022," + - "\n\007Balance\022\017.BalanceRequest\032\020.BalanceResp" + - "onse\022M\n\022SetBalancerRunning\022\032.SetBalancer" + - "RunningRequest\032\033.SetBalancerRunningRespo" + - "nse\022A\n\016RunCatalogScan\022\026.RunCatalogScanRe" + - "quest\032\027.RunCatalogScanResponse\022S\n\024Enable" + - "CatalogJanitor\022\034.EnableCatalogJanitorReq" + - "uest\032\035.EnableCatalogJanitorResponse\022\\\n\027I", - "sCatalogJanitorEnabled\022\037.IsCatalogJanito" + - "rEnabledRequest\032 .IsCatalogJanitorEnable" + - "dResponse\022L\n\021ExecMasterService\022\032.Coproce" + - "ssorServiceRequest\032\033.CoprocessorServiceR" + - "esponse\022/\n\010Snapshot\022\020.SnapshotRequest\032\021." + - "SnapshotResponse\022V\n\025GetCompletedSnapshot" + - "s\022\035.GetCompletedSnapshotsRequest\032\036.GetCo" + - "mpletedSnapshotsResponse\022A\n\016DeleteSnapsh" + - "ot\022\026.DeleteSnapshotRequest\032\027.DeleteSnaps" + - "hotResponse\022A\n\016IsSnapshotDone\022\026.IsSnapsh", - "otDoneRequest\032\027.IsSnapshotDoneResponse\022D" + - "\n\017RestoreSnapshot\022\027.RestoreSnapshotReque" + - "st\032\030.RestoreSnapshotResponse\022V\n\025IsRestor" + - "eSnapshotDone\022\035.IsRestoreSnapshotDoneReq" + - "uest\032\036.IsRestoreSnapshotDoneResponse\022>\n\r" + - "ExecProcedure\022\025.ExecProcedureRequest\032\026.E" + - "xecProcedureResponse\022E\n\024ExecProcedureWit" + - "hRet\022\025.ExecProcedureRequest\032\026.ExecProced" + - "ureResponse\022D\n\017IsProcedureDone\022\027.IsProce" + - "dureDoneRequest\032\030.IsProcedureDoneRespons", - "e\022D\n\017ModifyNamespace\022\027.ModifyNamespaceRe" + - "quest\032\030.ModifyNamespaceResponse\022D\n\017Creat" + - "eNamespace\022\027.CreateNamespaceRequest\032\030.Cr" + - "eateNamespaceResponse\022D\n\017DeleteNamespace" + - "\022\027.DeleteNamespaceRequest\032\030.DeleteNamesp" + - "aceResponse\022Y\n\026GetNamespaceDescriptor\022\036." + - "GetNamespaceDescriptorRequest\032\037.GetNames" + - "paceDescriptorResponse\022_\n\030ListNamespaceD" + - "escriptors\022 .ListNamespaceDescriptorsReq" + - "uest\032!.ListNamespaceDescriptorsResponse\022", - "t\n\037ListTableDescriptorsByNamespace\022\'.Lis" + - "tTableDescriptorsByNamespaceRequest\032(.Li" + - "stTableDescriptorsByNamespaceResponse\022b\n" + - "\031ListTableNamesByNamespace\022!.ListTableNa" + - "mesByNamespaceRequest\032\".ListTableNamesBy" + - "NamespaceResponseBB\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\014MasterProtosH\001\210" + - "\001\001\240\001\001" + "ame\"6\n\024GetTableStateRequest\022\036\n\ntable_nam" + + "e\030\001 \002(\0132\n.TableName\"9\n\025GetTableStateResp" + + "onse\022 \n\013table_state\030\001 \002(\0132\013.TableState\"\031", + "\n\027GetClusterStatusRequest\"B\n\030GetClusterS" + + "tatusResponse\022&\n\016cluster_status\030\001 \002(\0132\016." + + "ClusterStatus\"\030\n\026IsMasterRunningRequest\"" + + "4\n\027IsMasterRunningResponse\022\031\n\021is_master_" + + "running\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n" + + "\tprocedure\030\001 \002(\0132\025.ProcedureDescription\"" + + "F\n\025ExecProcedureResponse\022\030\n\020expected_tim" + + "eout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsPro" + + "cedureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.P" + + "rocedureDescription\"W\n\027IsProcedureDoneRe", + "sponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030" + + "\002 \001(\0132\025.ProcedureDescription2\265\030\n\rMasterS" + + "ervice\022S\n\024GetSchemaAlterStatus\022\034.GetSche" + + "maAlterStatusRequest\032\035.GetSchemaAlterSta" + + "tusResponse\022P\n\023GetTableDescriptors\022\033.Get" + + "TableDescriptorsRequest\032\034.GetTableDescri" + + "ptorsResponse\022>\n\rGetTableNames\022\025.GetTabl" + + "eNamesRequest\032\026.GetTableNamesResponse\022G\n" + + "\020GetClusterStatus\022\030.GetClusterStatusRequ" + + "est\032\031.GetClusterStatusResponse\022D\n\017IsMast", + "erRunning\022\027.IsMasterRunningRequest\032\030.IsM" + + "asterRunningResponse\0222\n\tAddColumn\022\021.AddC" + + "olumnRequest\032\022.AddColumnResponse\022;\n\014Dele" + + "teColumn\022\024.DeleteColumnRequest\032\025.DeleteC" + + "olumnResponse\022;\n\014ModifyColumn\022\024.ModifyCo" + + "lumnRequest\032\025.ModifyColumnResponse\0225\n\nMo" + + "veRegion\022\022.MoveRegionRequest\032\023.MoveRegio" + + "nResponse\022Y\n\026DispatchMergingRegions\022\036.Di" + + "spatchMergingRegionsRequest\032\037.DispatchMe" + + "rgingRegionsResponse\022;\n\014AssignRegion\022\024.A", + "ssignRegionRequest\032\025.AssignRegionRespons" + + "e\022A\n\016UnassignRegion\022\026.UnassignRegionRequ" + + "est\032\027.UnassignRegionResponse\022>\n\rOfflineR" + + "egion\022\025.OfflineRegionRequest\032\026.OfflineRe" + + "gionResponse\0228\n\013DeleteTable\022\023.DeleteTabl" + + "eRequest\032\024.DeleteTableResponse\022>\n\rtrunca" + + "teTable\022\025.TruncateTableRequest\032\026.Truncat" + + "eTableResponse\0228\n\013EnableTable\022\023.EnableTa" + + "bleRequest\032\024.EnableTableResponse\022;\n\014Disa" + + "bleTable\022\024.DisableTableRequest\032\025.Disable", + "TableResponse\0228\n\013ModifyTable\022\023.ModifyTab" + + "leRequest\032\024.ModifyTableResponse\0228\n\013Creat" + + "eTable\022\023.CreateTableRequest\032\024.CreateTabl" + + "eResponse\022/\n\010Shutdown\022\020.ShutdownRequest\032" + + "\021.ShutdownResponse\0225\n\nStopMaster\022\022.StopM" + + "asterRequest\032\023.StopMasterResponse\022,\n\007Bal" + + "ance\022\017.BalanceRequest\032\020.BalanceResponse\022" + + "M\n\022SetBalancerRunning\022\032.SetBalancerRunni" + + "ngRequest\032\033.SetBalancerRunningResponse\022A" + + "\n\016RunCatalogScan\022\026.RunCatalogScanRequest", + "\032\027.RunCatalogScanResponse\022S\n\024EnableCatal" + + "ogJanitor\022\034.EnableCatalogJanitorRequest\032" + + "\035.EnableCatalogJanitorResponse\022\\\n\027IsCata" + + "logJanitorEnabled\022\037.IsCatalogJanitorEnab" + + "ledRequest\032 .IsCatalogJanitorEnabledResp" + + "onse\022L\n\021ExecMasterService\022\032.CoprocessorS" + + "erviceRequest\032\033.CoprocessorServiceRespon" + + "se\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Snaps" + + "hotResponse\022V\n\025GetCompletedSnapshots\022\035.G" + + "etCompletedSnapshotsRequest\032\036.GetComplet", + "edSnapshotsResponse\022A\n\016DeleteSnapshot\022\026." + + "DeleteSnapshotRequest\032\027.DeleteSnapshotRe" + + "sponse\022A\n\016IsSnapshotDone\022\026.IsSnapshotDon" + + "eRequest\032\027.IsSnapshotDoneResponse\022D\n\017Res" + + "toreSnapshot\022\027.RestoreSnapshotRequest\032\030." + + "RestoreSnapshotResponse\022V\n\025IsRestoreSnap" + + "shotDone\022\035.IsRestoreSnapshotDoneRequest\032" + + "\036.IsRestoreSnapshotDoneResponse\022>\n\rExecP" + + "rocedure\022\025.ExecProcedureRequest\032\026.ExecPr" + + "ocedureResponse\022E\n\024ExecProcedureWithRet\022", + "\025.ExecProcedureRequest\032\026.ExecProcedureRe" + + "sponse\022D\n\017IsProcedureDone\022\027.IsProcedureD" + + "oneRequest\032\030.IsProcedureDoneResponse\022D\n\017" + + "ModifyNamespace\022\027.ModifyNamespaceRequest" + + "\032\030.ModifyNamespaceResponse\022D\n\017CreateName" + + "space\022\027.CreateNamespaceRequest\032\030.CreateN" + + "amespaceResponse\022D\n\017DeleteNamespace\022\027.De" + + "leteNamespaceRequest\032\030.DeleteNamespaceRe" + + "sponse\022Y\n\026GetNamespaceDescriptor\022\036.GetNa" + + "mespaceDescriptorRequest\032\037.GetNamespaceD", + "escriptorResponse\022_\n\030ListNamespaceDescri" + + "ptors\022 .ListNamespaceDescriptorsRequest\032" + + "!.ListNamespaceDescriptorsResponse\022t\n\037Li" + + "stTableDescriptorsByNamespace\022\'.ListTabl" + + "eDescriptorsByNamespaceRequest\032(.ListTab" + + "leDescriptorsByNamespaceResponse\022b\n\031List" + + "TableNamesByNamespace\022!.ListTableNamesBy" + + "NamespaceRequest\032\".ListTableNamesByNames" + + "paceResponse\022>\n\rGetTableState\022\025.GetTable" + + "StateRequest\032\026.GetTableStateResponseBB\n*", + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -45293,50 +46508,62 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); - internal_static_GetClusterStatusRequest_descriptor = + internal_static_GetTableStateRequest_descriptor = getDescriptor().getMessageTypes().get(74); + internal_static_GetTableStateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetTableStateResponse_descriptor = + getDescriptor().getMessageTypes().get(75); + internal_static_GetTableStateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateResponse_descriptor, + new java.lang.String[] { "TableState", }); + internal_static_GetClusterStatusRequest_descriptor = + getDescriptor().getMessageTypes().get(76); internal_static_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 6da497e..e116f73 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -3242,619 +3242,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:SplitLogTask) } - public interface TableOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .Table.State state = 1 [default = ENABLED]; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - boolean hasState(); - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); - } - /** - * Protobuf type {@code Table} - * - *
-   **
-   * The znode that holds state of table.
-   * 
- */ - public static final class Table extends - com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Table defaultInstance; - public static Table getDefaultInstance() { - return defaultInstance; - } - - public Table getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Table( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - state_ = value; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser
() { - public Table parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Table(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser
getParserForType() { - return PARSER; - } - - /** - * Protobuf enum {@code Table.State} - * - *
-     * Table's current state
-     * 
- */ - public enum State - implements com.google.protobuf.ProtocolMessageEnum { - /** - * ENABLED = 0; - */ - ENABLED(0, 0), - /** - * DISABLED = 1; - */ - DISABLED(1, 1), - /** - * DISABLING = 2; - */ - DISABLING(2, 2), - /** - * ENABLING = 3; - */ - ENABLING(3, 3), - ; - - /** - * ENABLED = 0; - */ - public static final int ENABLED_VALUE = 0; - /** - * DISABLED = 1; - */ - public static final int DISABLED_VALUE = 1; - /** - * DISABLING = 2; - */ - public static final int DISABLING_VALUE = 2; - /** - * ENABLING = 3; - */ - public static final int ENABLING_VALUE = 3; - - - public final int getNumber() { return value; } - - public static State valueOf(int value) { - switch (value) { - case 0: return ENABLED; - case 1: return DISABLED; - case 2: return DISABLING; - case 3: return ENABLING; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public State findValueByNumber(int number) { - return State.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0); - } - - private static final State[] VALUES = values(); - - public static State valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private State(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:Table.State) - } - - private int bitField0_; - // required .Table.State state = 1 [default = ENABLED]; - public static final int STATE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-     * This is the table's state.  If no znode for a table,
-     * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-     * for more.
-     * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - - private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasState()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, state_.getNumber()); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, state_.getNumber()); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; - - boolean result = true; - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code Table} - * - *
-     **
-     * The znode that holds state of table.
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.state_ = state_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; - if (other.hasState()) { - setState(other.getState()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasState()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required .Table.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public boolean hasState() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { - return state_; - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - state_ = value; - onChanged(); - return this; - } - /** - * required .Table.State state = 1 [default = ENABLED]; - * - *
-       * This is the table's state.  If no znode for a table,
-       * its state is presumed enabled.  See o.a.h.h.zookeeper.ZKTable class
-       * for more.
-       * 
- */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000001); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Table) - } - - static { - defaultInstance = new Table(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Table) - } - public interface ReplicationPeerOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -9512,11 +8899,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SplitLogTask_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_Table_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Table_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_ReplicationPeer_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -9573,27 +8955,24 @@ public final class ZooKeeperProtos { "UNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n" + "\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007UNKN" + "OWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPLAY\020\002" + - "\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007" + - "ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLE" + - "D\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017Rep" + - "licationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027repl" + - "icationEndpointImpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132" + - "\017.BytesBytesPair\022&\n\rconfiguration\030\004 \003(\0132" + - "\017.NameStringPair\"^\n\020ReplicationState\022&\n\005", - "state\030\001 \002(\0162\027.ReplicationState.State\"\"\n\005" + - "State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Repl" + - "icationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n" + - "\017ReplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n" + - "\tTableLock\022\036\n\ntable_name\030\001 \001(\0132\n.TableNa" + - "me\022\037\n\nlock_owner\030\002 \001(\0132\013.ServerName\022\021\n\tt" + - "hread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007pur" + - "pose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\";\n\017Store" + - "SequenceId\022\023\n\013family_name\030\001 \002(\014\022\023\n\013seque" + - "nce_id\030\002 \002(\004\"g\n\026RegionStoreSequenceIds\022 ", - "\n\030last_flushed_sequence_id\030\001 \002(\004\022+\n\021stor" + - "e_sequence_id\030\002 \003(\0132\020.StoreSequenceIdBE\n" + - "*org.apache.hadoop.hbase.protobuf.genera" + - "tedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "\"\215\001\n\017ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t" + + "\022\037\n\027replicationEndpointImpl\030\002 \001(\t\022\035\n\004dat" + + "a\030\003 \003(\0132\017.BytesBytesPair\022&\n\rconfiguratio" + + "n\030\004 \003(\0132\017.NameStringPair\"^\n\020ReplicationS" + + "tate\022&\n\005state\030\001 \002(\0162\027.ReplicationState.S" + + "tate\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001" + + "\"+\n\027ReplicationHLogPosition\022\020\n\010position\030", + "\001 \002(\003\"%\n\017ReplicationLock\022\022\n\nlock_owner\030\001" + + " \002(\t\"\230\001\n\tTableLock\022\036\n\ntable_name\030\001 \001(\0132\n" + + ".TableName\022\037\n\nlock_owner\030\002 \001(\0132\013.ServerN" + + "ame\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(" + + "\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"" + + ";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(\014\022" + + "\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeque" + + "nceIds\022 \n\030last_flushed_sequence_id\030\001 \002(\004" + + "\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeque" + + "nceIdBE\n*org.apache.hadoop.hbase.protobu", + "f.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9624,50 +9003,44 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitLogTask_descriptor, new java.lang.String[] { "State", "ServerName", "Mode", }); - internal_static_Table_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_Table_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Table_descriptor, - new java.lang.String[] { "State", }); internal_static_ReplicationPeer_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(4); internal_static_ReplicationPeer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationPeer_descriptor, new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", }); internal_static_ReplicationState_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(5); internal_static_ReplicationState_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationState_descriptor, new java.lang.String[] { "State", }); internal_static_ReplicationHLogPosition_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(6); internal_static_ReplicationHLogPosition_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationHLogPosition_descriptor, new java.lang.String[] { "Position", }); internal_static_ReplicationLock_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(7); internal_static_ReplicationLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationLock_descriptor, new java.lang.String[] { "LockOwner", }); internal_static_TableLock_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(8); internal_static_TableLock_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); internal_static_StoreSequenceId_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(9); internal_static_StoreSequenceId_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StoreSequenceId_descriptor, new java.lang.String[] { "FamilyName", "SequenceId", }); internal_static_RegionStoreSequenceIds_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(10); internal_static_RegionStoreSequenceIds_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionStoreSequenceIds_descriptor, diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto index ca09777..252f532 100644 --- a/hbase-protocol/src/main/protobuf/HBase.proto +++ b/hbase-protocol/src/main/protobuf/HBase.proto @@ -44,6 +44,27 @@ message TableSchema { repeated NameStringPair configuration = 4; } +/** Denotes state of the table */ +message TableState { + // Table's current state + enum State { + ENABLED = 0; + DISABLED = 1; + DISABLING = 2; + ENABLING = 3; + } + // This is the table's state. + required State state = 1; + required TableName table = 2; + optional uint64 timestamp = 3; +} + +/** On HDFS representation of table state. */ +message TableDescriptor { + required TableSchema schema = 1; + optional TableState.State state = 2 [ default = ENABLED ]; +} + /** * Column Family Schema * Inspired by the rest ColumSchemaMessage diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 94ea860..85daf43 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -326,6 +326,14 @@ message GetTableNamesResponse { repeated TableName table_names = 1; } +message GetTableStateRequest { + required TableName table_name = 1; +} + +message GetTableStateResponse { + required TableState table_state = 1; +} + message GetClusterStatusRequest { } @@ -565,4 +573,8 @@ service MasterService { /** returns a list of tables for a given namespace*/ rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest) returns(ListTableNamesByNamespaceResponse); + + /** returns table state */ + rpc GetTableState(GetTableStateRequest) + returns(GetTableStateResponse); } diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 8acd778..efa2296 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -86,23 +86,6 @@ message SplitLogTask { } /** - * The znode that holds state of table. - */ -message Table { - // Table's current state - enum State { - ENABLED = 0; - DISABLED = 1; - DISABLING = 2; - ENABLING = 3; - } - // This is the table's state. If no znode for a table, - // its state is presumed enabled. See o.a.h.h.zookeeper.ZKTable class - // for more. - required State state = 1 [default = ENABLED]; -} - -/** * Used by replication. Holds a replication peer key. */ message ReplicationPeer { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java index 2642e29..1019b2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java @@ -55,12 +55,4 @@ public interface CoordinatedStateManager { * @return instance of Server coordinated state manager runs within */ Server getServer(); - - /** - * Returns implementation of TableStateManager. - * @throws InterruptedException if operation is interrupted - * @throws CoordinatedStateException if error happens in underlying coordination mechanism - */ - TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java new file mode 100644 index 0000000..bf38ee5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +/** + * Class represents table state on HDFS. + */ +@InterfaceAudience.Private +public class TableDescriptor { + private HTableDescriptor hTableDescriptor; + private TableState.State tableState; + + /** + * Creates TableDescriptor with all fields. + * @param hTableDescriptor HTableDescriptor to use + * @param tableState table state + */ + public TableDescriptor(HTableDescriptor hTableDescriptor, + TableState.State tableState) { + this.hTableDescriptor = hTableDescriptor; + this.tableState = tableState; + } + + /** + * Creates TableDescriptor with Enabled table. + * @param hTableDescriptor HTableDescriptor to use + */ + @VisibleForTesting + public TableDescriptor(HTableDescriptor hTableDescriptor) { + this(hTableDescriptor, TableState.State.ENABLED); + } + + /** + * Associated HTableDescriptor + * @return instance of HTableDescriptor + */ + public HTableDescriptor getHTableDescriptor() { + return hTableDescriptor; + } + + public void setHTableDescriptor(HTableDescriptor hTableDescriptor) { + this.hTableDescriptor = hTableDescriptor; + } + + public TableState.State getTableState() { + return tableState; + } + + public void setTableState(TableState.State tableState) { + this.tableState = tableState; + } + + /** + * Convert to PB. + */ + public HBaseProtos.TableDescriptor convert() { + return HBaseProtos.TableDescriptor.newBuilder() + .setSchema(hTableDescriptor.convert()) + .setState(tableState.convert()) + .build(); + } + + /** + * Convert from PB + */ + public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) { + HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema()); + TableState.State state = TableState.State.convert(proto.getState()); + return new TableDescriptor(hTableDescriptor, state); + } + + /** + * @return This instance serialized with pb with pb magic prefix + * @see #parseFrom(byte[]) + */ + public byte [] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** + * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix + * @see #toByteArray() + */ + public static TableDescriptor parseFrom(final byte [] bytes) + throws DeserializationException, IOException { + if (!ProtobufUtil.isPBMagicPrefix(bytes)) { + throw new DeserializationException("Expected PB encoded TableDescriptor"); + } + int pblen = ProtobufUtil.lengthOfPBMagic(); + HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder(); + HBaseProtos.TableDescriptor ts; + try { + ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build(); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return convert(ts); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TableDescriptor that = (TableDescriptor) o; + + if (hTableDescriptor != null ? + !hTableDescriptor.equals(that.hTableDescriptor) : + that.hTableDescriptor != null) return false; + if (tableState != that.tableState) return false; + + return true; + } + + @Override + public int hashCode() { + int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0; + result = 31 * result + (tableState != null ? tableState.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "TableDescriptor{" + + "hTableDescriptor=" + hTableDescriptor + + ", tableState=" + tableState + + '}'; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index a0c246b..ff5f0b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; /** * Get, remove and modify table descriptors. @@ -38,6 +37,14 @@ public interface TableDescriptors { throws IOException; /** + * @param tableName + * @return TableDescriptor for tablename + * @throws IOException + */ + TableDescriptor getDescriptor(final TableName tableName) + throws IOException; + + /** * Get Map of all NamespaceDescriptors for a given namespace. * @return Map of all descriptors. * @throws IOException @@ -55,6 +62,15 @@ public interface TableDescriptors { throws IOException; /** + * Get Map of all TableDescriptors. Populates the descriptor cache as a + * side effect. + * @return Map of all descriptors. + * @throws IOException + */ + Map getAllDescriptors() + throws IOException; + + /** * Add or update descriptor * @param htd Descriptor to set into TableDescriptors * @throws IOException @@ -63,6 +79,14 @@ public interface TableDescriptors { throws IOException; /** + * Add or update descriptor + * @param htd Descriptor to set into TableDescriptors + * @throws IOException + */ + void add(final TableDescriptor htd) + throws IOException; + + /** * @param tablename * @return Instance of table descriptor or null if none found. * @throws IOException diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java deleted file mode 100644 index 56cd4ae..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; - -import java.io.InterruptedIOException; -import java.util.Set; - -/** - * Helper class for table state management for operations running inside - * RegionServer or HMaster. - * Depending on implementation, fetches information from HBase system table, - * local data store, ZooKeeper ensemble or somewhere else. - * Code running on client side (with no coordinated state context) shall instead use - * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader} - */ -@InterfaceAudience.Private -public interface TableStateManager { - - /** - * Sets the table into desired state. Fails silently if the table is already in this state. - * @param tableName table to process - * @param state new state of this table - * @throws CoordinatedStateException if error happened when trying to set table state - */ - void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException; - - /** - * Sets the specified table into the newState, but only if the table is already in - * one of the possibleCurrentStates (otherwise no operation is performed). - * @param tableName table to process - * @param newState new state for the table - * @param states table should be in one of these states for the operation - * to be performed - * @throws CoordinatedStateException if error happened while performing operation - * @return true if operation succeeded, false otherwise - */ - boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException; - - /** - * Sets the specified table into the newState, but only if the table is NOT in - * one of the possibleCurrentStates (otherwise no operation is performed). - * @param tableName table to process - * @param newState new state for the table - * @param states table should NOT be in one of these states for the operation - * to be performed - * @throws CoordinatedStateException if error happened while performing operation - * @return true if operation succeeded, false otherwise - */ - boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException; - - /** - * @return true if the table is in any one of the listed states, false otherwise. - */ - boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states); - - /** - * Mark table as deleted. Fails silently if the table is not currently marked as disabled. - * @param tableName table to be deleted - * @throws CoordinatedStateException if error happened while performing operation - */ - void setDeletedTable(TableName tableName) throws CoordinatedStateException; - - /** - * Checks if table is present. - * - * @param tableName table we're checking - * @return true if the table is present, false otherwise - */ - boolean isTablePresent(TableName tableName); - - /** - * @return set of tables which are in any one of the listed states, empty Set if none - */ - Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException; - - /** - * If the table is found in the given state the in-memory state is removed. This - * helps in cases where CreateTable is to be retried by the client in case of - * failures. If deletePermanentState is true - the flag kept permanently is - * also reset. - * - * @param tableName table we're working on - * @param states if table isn't in any one of these states, operation aborts - * @param deletePermanentState if true, reset the permanent flag - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index db853ed..6654032 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -118,4 +118,4 @@ public class CoprocessorHConnection extends HConnectionImplementation { public NonceGenerator getNonceGenerator() { return NO_NONCE_GEN; // don't use nonces for coprocessor connection } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index 295cefe..cb59dff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hbase.coordination; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableStateManager; /** * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations. @@ -49,10 +47,6 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan return null; } - @Override - public abstract TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException; - /** * Method to retrieve coordination for split log worker */ @@ -61,4 +55,4 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan * Method to retrieve coordination for split log manager */ public abstract SplitLogManagerCoordination getSplitLogManagerCoordination(); -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 98500d3..cce6091 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -18,14 +18,9 @@ package org.apache.hadoop.hbase.coordination; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; /** * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}. @@ -52,16 +47,6 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { } @Override - public TableStateManager getTableStateManager() throws InterruptedException, - CoordinatedStateException { - try { - return new ZKTableStateManager(server.getZooKeeper()); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - - @Override public SplitLogWorkerCoordination getSplitLogWorkerCoordination() { return splitLogWorkerCoordination; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 5ecbe98..eeb5997 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -41,13 +40,13 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -60,23 +59,21 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.EnableTableHandler; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; @@ -91,8 +88,6 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.zookeeper.KeeperException; -import com.google.common.annotations.VisibleForTesting; - /** * Manages and performs region assignment. * Related communications with regionserver are all done over RPC. @@ -217,14 +212,14 @@ public class AssignmentManager { * @param service Executor service * @param metricsMaster metrics manager * @param tableLockManager TableLock manager - * @throws CoordinatedStateException * @throws IOException */ public AssignmentManager(Server server, ServerManager serverManager, final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster, - final TableLockManager tableLockManager) - throws IOException, CoordinatedStateException { + final TableLockManager tableLockManager, + final TableStateManager tableStateManager) + throws IOException { this.server = server; this.serverManager = serverManager; this.executorService = service; @@ -236,15 +231,9 @@ public class AssignmentManager { this.shouldAssignRegionsWithFavoredNodes = conf.getClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals( FavoredNodeLoadBalancer.class); - try { - if (server.getCoordinatedStateManager() != null) { - this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager(); - } else { - this.tableStateManager = null; - } - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } + + this.tableStateManager = tableStateManager; + // This is the max attempts, not retries, so it should be at least 1. this.maximumAttempts = Math.max(1, this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10)); @@ -402,10 +391,9 @@ public class AssignmentManager { * @throws IOException * @throws KeeperException * @throws InterruptedException - * @throws CoordinatedStateException */ void joinCluster() throws IOException, - KeeperException, InterruptedException, CoordinatedStateException { + KeeperException, InterruptedException { long startTime = System.currentTimeMillis(); // Concurrency note: In the below the accesses on regionsInTransition are // outside of a synchronization block where usually all accesses to RIT are @@ -440,10 +428,9 @@ public class AssignmentManager { * Map of dead servers and their regions. Can be null. * @throws IOException * @throws InterruptedException - * @throws CoordinatedStateException */ boolean processDeadServersAndRegionsInTransition(final Set deadServers) - throws IOException, InterruptedException, CoordinatedStateException { + throws IOException, InterruptedException { boolean failover = !serverManager.getDeadServers().isEmpty(); if (failover) { // This may not be a failover actually, especially if meta is on this master. @@ -512,8 +499,8 @@ public class AssignmentManager { if (!failover) { disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, TableState.State.DISABLING, + TableState.State.ENABLING); // Clean re/start, mark all user regions closed before reassignment allRegions = regionStates.closeAllUserRegions( @@ -738,7 +725,7 @@ public class AssignmentManager { for (RegionState state: states) { HRegionInfo region = state.getRegion(); regionStates.updateRegionState( - region, State.PENDING_OPEN, destination); + region, RegionState.State.PENDING_OPEN, destination); List favoredNodes = ServerName.EMPTY_SERVER_LIST; if (this.shouldAssignRegionsWithFavoredNodes) { favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region); @@ -855,7 +842,7 @@ public class AssignmentManager { if (!serverManager.isServerOnline(server)) { LOG.debug("Offline " + region.getRegionNameAsString() + ", no need to unassign since it's on a dead server: " + server); - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); return; } try { @@ -879,7 +866,7 @@ public class AssignmentManager { || t instanceof ServerNotRunningYetException) { LOG.debug("Offline " + region.getRegionNameAsString() + ", it's not any more on " + server, t); - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); return; } else if (t instanceof FailedServerException || t instanceof RegionAlreadyInTransitionException) { @@ -913,7 +900,7 @@ public class AssignmentManager { + region.getRegionNameAsString() + " since interrupted", ie); Thread.currentThread().interrupt(); if (state != null) { - regionStates.updateRegionState(region, State.FAILED_CLOSE); + regionStates.updateRegionState(region, RegionState.State.FAILED_CLOSE); } return; } @@ -929,7 +916,7 @@ public class AssignmentManager { } // Run out of attempts if (state != null) { - regionStates.updateRegionState(region, State.FAILED_CLOSE); + regionStates.updateRegionState(region, RegionState.State.FAILED_CLOSE); } } @@ -1020,7 +1007,7 @@ public class AssignmentManager { Thread.currentThread().interrupt(); } } - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); return; } // In case of assignment from EnableTableHandler table state is ENABLING. Any how @@ -1031,7 +1018,7 @@ public class AssignmentManager { // will not be in ENABLING or ENABLED state. TableName tableName = region.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) { + TableState.State.ENABLED, TableState.State.ENABLING)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); } @@ -1039,7 +1026,7 @@ public class AssignmentManager { " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN regionStates.updateRegionState(region, - State.PENDING_OPEN, plan.getDestination()); + RegionState.State.PENDING_OPEN, plan.getDestination()); boolean needNewPlan; final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() + @@ -1114,7 +1101,7 @@ public class AssignmentManager { } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); Thread.currentThread().interrupt(); return; } @@ -1150,7 +1137,7 @@ public class AssignmentManager { LOG.warn("Failed to get region plan", e); } if (newPlan == null) { - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); LOG.warn("Unable to find a viable location to assign region " + region.getRegionNameAsString()); return; @@ -1160,7 +1147,7 @@ public class AssignmentManager { // Clean out plan we failed execute and one that doesn't look like it'll // succeed anyways; we need a new plan! // Transition back to OFFLINE - regionStates.updateRegionState(region, State.OFFLINE); + regionStates.updateRegionState(region, RegionState.State.OFFLINE); plan = newPlan; } else if(plan.getDestination().equals(newPlan.getDestination()) && previousException instanceof FailedServerException) { @@ -1172,7 +1159,7 @@ public class AssignmentManager { } catch (InterruptedException ie) { LOG.warn("Failed to assign " + region.getRegionNameAsString() + " since interrupted", ie); - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); Thread.currentThread().interrupt(); return; } @@ -1180,7 +1167,7 @@ public class AssignmentManager { } } // Run out of attempts - regionStates.updateRegionState(region, State.FAILED_OPEN); + regionStates.updateRegionState(region, RegionState.State.FAILED_OPEN); } finally { metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTime() - startTime); } @@ -1188,8 +1175,8 @@ public class AssignmentManager { private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { if (this.tableStateManager.isTableState(region.getTable(), - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) { + TableState.State.DISABLED, + TableState.State.DISABLING) || replicasToClose.contains(region)) { LOG.info("Table " + region.getTable() + " is disabled or disabling;" + " skipping assign of " + region.getRegionNameAsString()); offlineDisabledRegion(region); @@ -1344,7 +1331,7 @@ public class AssignmentManager { } } state = regionStates.updateRegionState( - region, State.PENDING_CLOSE); + region, RegionState.State.PENDING_CLOSE); } else if (state.isFailedOpen()) { // The region is not open yet regionOffline(region); @@ -1362,7 +1349,7 @@ public class AssignmentManager { // Region is expected to be reassigned afterwards if (!replicasToClose.contains(region) - && regionStates.isRegionInState(region, State.OFFLINE)) { + && regionStates.isRegionInState(region, RegionState.State.OFFLINE)) { assign(region); } } @@ -1388,7 +1375,7 @@ public class AssignmentManager { public boolean waitForAssignment(HRegionInfo regionInfo) throws InterruptedException { while (!regionStates.isRegionOnline(regionInfo)) { - if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN) + if (regionStates.isRegionInState(regionInfo, RegionState.State.FAILED_OPEN) || this.server.isStopped()) { return false; } @@ -1408,7 +1395,7 @@ public class AssignmentManager { * any RegionServer. */ public void assignMeta() throws KeeperException { - regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, State.OFFLINE); + regionStates.updateRegionState(HRegionInfo.FIRST_META_REGIONINFO, RegionState.State.OFFLINE); assign(HRegionInfo.FIRST_META_REGIONINFO); } @@ -1529,7 +1516,7 @@ public class AssignmentManager { for (HRegionInfo hri : regionsFromMetaScan) { TableName tableName = hri.getTable(); if (!tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -1574,14 +1561,14 @@ public class AssignmentManager { * @throws IOException */ Set rebuildUserRegions() throws - IOException, KeeperException, CoordinatedStateException { + IOException, KeeperException { Set disabledOrEnablingTables = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, TableState.State.ENABLING); Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.DISABLED, + TableState.State.DISABLING, + TableState.State.ENABLING); // Region assignment from META List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection()); @@ -1616,11 +1603,11 @@ public class AssignmentManager { HRegionInfo regionInfo = hrl.getRegionInfo(); if (regionInfo == null) continue; int replicaId = regionInfo.getReplicaId(); - State state = RegionStateStore.getRegionState(result, replicaId); + RegionState.State state = RegionStateStore.getRegionState(result, replicaId); // keep a track of replicas to close. These were the replicas of the split parents // from the previous life of the master. The master should have closed them before // but it couldn't maybe because it crashed - if (replicaId == 0 && state.equals(State.SPLIT)) { + if (replicaId == 0 && state.equals(RegionState.State.SPLIT)) { for (HRegionLocation h : locations) { replicasToClose.add(h.getRegionInfo()); } @@ -1628,7 +1615,7 @@ public class AssignmentManager { ServerName lastHost = hrl.getServerName(); ServerName regionLocation = RegionStateStore.getRegionServer(result, replicaId); regionStates.createRegionState(regionInfo, state, regionLocation, lastHost); - if (!regionStates.isRegionInState(regionInfo, State.OPEN)) { + if (!regionStates.isRegionInState(regionInfo, RegionState.State.OPEN)) { // Region is not open (either offline or in transition), skip continue; } @@ -1646,7 +1633,7 @@ public class AssignmentManager { // this will be used in rolling restarts if (!disabledOrDisablingOrEnabling.contains(tableName) && !getTableStateManager().isTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { setEnabledTable(tableName); } } @@ -1663,9 +1650,9 @@ public class AssignmentManager { * @throws IOException */ private void recoverTableInDisablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set disablingTables = - tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING); + tableStateManager.getTablesInStates(TableState.State.DISABLING); if (disablingTables.size() != 0) { for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler @@ -1687,9 +1674,9 @@ public class AssignmentManager { * @throws IOException */ private void recoverTableInEnablingState() - throws KeeperException, IOException, CoordinatedStateException { + throws KeeperException, IOException { Set enablingTables = tableStateManager. - getTablesInStates(ZooKeeperProtos.Table.State.ENABLING); + getTablesInStates(TableState.State.ENABLING); if (enablingTables.size() != 0) { for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler @@ -1724,7 +1711,7 @@ public class AssignmentManager { if (!serverManager.isServerOnline(regionState.getServerName())) { continue; // SSH will handle it } - State state = regionState.getState(); + RegionState.State state = regionState.getState(); LOG.info("Processing " + regionState); switch (state) { case PENDING_OPEN: @@ -2010,13 +1997,13 @@ public class AssignmentManager { it.remove(); } else { if (tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { regionStates.regionOffline(hri); it.remove(); continue; } // Mark the region offline and assign it again by SSH - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } } finally { lock.unlock(); @@ -2032,7 +2019,7 @@ public class AssignmentManager { HRegionInfo hri = plan.getRegionInfo(); TableName tableName = hri.getTable(); if (tableStateManager.isTableState(tableName, - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { LOG.info("Ignored moving region of disabling/disabled table " + tableName); return; @@ -2066,8 +2053,8 @@ public class AssignmentManager { protected void setEnabledTable(TableName tableName) { try { this.tableStateManager.setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { + TableState.State.ENABLED); + } catch (IOException e) { // here we can abort as it is the start up flow String errorMsg = "Unable to ensure that the table " + tableName + " will be" + " enabled because of a ZooKeeper issue"; @@ -2088,19 +2075,19 @@ public class AssignmentManager { failedOpenTracker.put(encodedName, failedOpenCount); } if (failedOpenCount.incrementAndGet() >= maximumAttempts) { - regionStates.updateRegionState(hri, State.FAILED_OPEN); + regionStates.updateRegionState(hri, RegionState.State.FAILED_OPEN); // remove the tracking info to save memory, also reset // the count for next open initiative failedOpenTracker.remove(encodedName); } else { // Handle this the same as if it were opened and then closed. - RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED); + RegionState regionState = regionStates.updateRegionState(hri, RegionState.State.CLOSED); if (regionState != null) { // When there are more than one region server a new RS is selected as the // destination and the same is updated in the region plan. (HBASE-5546) if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + TableState.State.DISABLED, TableState.State.DISABLING) || + replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -2125,15 +2112,15 @@ public class AssignmentManager { // reset the count, if any failedOpenTracker.remove(hri.getEncodedName()); if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(hri); } } private void onRegionClosed(final HRegionInfo hri) { if (getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || - replicasToClose.contains(hri)) { + TableState.State.DISABLED, TableState.State.DISABLING) || + replicasToClose.contains(hri)) { offlineDisabledRegion(hri); return; } @@ -2155,21 +2142,21 @@ public class AssignmentManager { return "Not in state good for split"; } - regionStates.updateRegionState(a, State.SPLITTING_NEW, sn); - regionStates.updateRegionState(b, State.SPLITTING_NEW, sn); - regionStates.updateRegionState(p, State.SPLITTING); + regionStates.updateRegionState(a, RegionState.State.SPLITTING_NEW, sn); + regionStates.updateRegionState(b, RegionState.State.SPLITTING_NEW, sn); + regionStates.updateRegionState(p, RegionState.State.SPLITTING); if (code == TransitionCode.SPLIT) { if (TEST_SKIP_SPLIT_HANDLING) { return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set"; } - regionOffline(p, State.SPLIT); + regionOffline(p, RegionState.State.SPLIT); regionOnline(a, sn, 1); regionOnline(b, sn, 1); // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } else { @@ -2195,7 +2182,7 @@ public class AssignmentManager { regionOffline(b); if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } } @@ -2212,9 +2199,9 @@ public class AssignmentManager { return "Not in state good for merge"; } - regionStates.updateRegionState(a, State.MERGING); - regionStates.updateRegionState(b, State.MERGING); - regionStates.updateRegionState(p, State.MERGING_NEW, sn); + regionStates.updateRegionState(a, RegionState.State.MERGING); + regionStates.updateRegionState(b, RegionState.State.MERGING); + regionStates.updateRegionState(p, RegionState.State.MERGING_NEW, sn); String encodedName = p.getEncodedName(); if (code == TransitionCode.READY_TO_MERGE) { @@ -2222,13 +2209,13 @@ public class AssignmentManager { new PairOfSameType(a, b)); } else if (code == TransitionCode.MERGED) { mergingRegions.remove(encodedName); - regionOffline(a, State.MERGED); - regionOffline(b, State.MERGED); + regionOffline(a, RegionState.State.MERGED); + regionOffline(b, RegionState.State.MERGED); regionOnline(p, sn, 1); // User could disable the table before master knows the new region. if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(p); } else { Callable mergeReplicasCallable = new Callable() { @@ -2254,7 +2241,7 @@ public class AssignmentManager { regionOffline(p); if (getTableStateManager().isTableState(p.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) { + TableState.State.DISABLED, TableState.State.DISABLING)) { invokeUnAssign(a); invokeUnAssign(b); } @@ -2371,7 +2358,7 @@ public class AssignmentManager { * if not null. If the specified state is null, the new state is Offline. * The specified state can be Split/Merged/Offline/null only. */ - private void regionOffline(final HRegionInfo regionInfo, final State state) { + private void regionOffline(final HRegionInfo regionInfo, final RegionState.State state) { regionStates.regionOffline(regionInfo, state); removeClosedRegion(regionInfo); // remove the region plan as well just in case. @@ -2381,7 +2368,7 @@ public class AssignmentManager { // Tell our listeners that a region was closed sendRegionClosedNotification(regionInfo); // also note that all the replicas of the primary should be closed - if (state != null && state.equals(State.SPLIT)) { + if (state != null && state.equals(RegionState.State.SPLIT)) { Collection c = new ArrayList(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); @@ -2390,7 +2377,7 @@ public class AssignmentManager { replicasToClose.addAll(list); } } - else if (state != null && state.equals(State.MERGED)) { + else if (state != null && state.equals(RegionState.State.MERGED)) { Collection c = new ArrayList(1); c.add(regionInfo); Map> map = regionStates.getRegionAssignments(c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 714b5a8..cb846f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -71,6 +71,8 @@ import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorType; @@ -224,6 +226,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // monitor for distributed procedures MasterProcedureManagerHost mpmHost; + // handle table states + private TableStateManager tableStateManager; + /** flag used in test cases in order to simulate RS failures during master initialization */ private volatile boolean initializationBeforeMetaAssignment = false; @@ -409,7 +414,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.loadBalancerTracker.start(); this.assignmentManager = new AssignmentManager(this, serverManager, this.balancer, this.service, this.metricsMaster, - this.tableLockManager); + this.tableLockManager, tableStateManager); this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager); @@ -490,6 +495,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); + this.tableStateManager = new TableStateManager(this); + this.tableStateManager.start(); + status.setStatus("Initializing ZK system trackers"); initializeZKBasedSystemTrackers(); @@ -737,8 +745,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } private void enableMeta(TableName metaTableName) { - if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName, - ZooKeeperProtos.Table.State.ENABLED)) { + if (!this.tableStateManager.isTableState(metaTableName, + TableState.State.ENABLED)) { this.assignmentManager.setEnabledTable(metaTableName); } } @@ -777,6 +785,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return this.fileSystemManager; } + @Override + public TableStateManager getTableStateManager() { + return tableStateManager; + } + /* * Start up all services. If any of these threads gets an unhandled exception * then they just die with a logged message. This should be fine because @@ -1452,7 +1465,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getTableStateManager(). - isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) { + isTableState(tableName, TableState.State.DISABLED)) { throw new TableNotDisabledException(tableName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index be702ab..49225c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -43,8 +43,10 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; @@ -454,7 +456,9 @@ public class MasterFileSystem { } // Create tableinfo-s for hbase:meta if not already there. - new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC); + // assume, created table descriptor is for enabling table + new FSTableDescriptors(fs, rd).createTableDescriptor( + new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLING)); return rd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index bb4a09c..1fb8dee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -37,8 +37,10 @@ import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; @@ -864,6 +866,25 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.GetTableStateResponse getTableState(RpcController controller, + MasterProtos.GetTableStateRequest request) throws ServiceException { + try { + master.checkServiceStarted(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + TableState.State state = master.getTableStateManager() + .getTableState(tableName); + if (state == null) + throw new TableNotFoundException(tableName); + MasterProtos.GetTableStateResponse.Builder builder = + MasterProtos.GetTableStateResponse.newBuilder(); + builder.setTableState(new TableState(tableName, state).convert()); + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, IsCatalogJanitorEnabledRequest req) throws ServiceException { return IsCatalogJanitorEnabledResponse.newBuilder().setValue( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index c1334f5..213f7f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -66,6 +66,11 @@ public interface MasterServices extends Server { TableLockManager getTableLockManager(); /** + * @return Master's instance of {@link TableStateManager} + */ + TableStateManager getTableStateManager(); + + /** * @return Master's instance of {@link MasterCoprocessorHost} */ MasterCoprocessorHost getMasterCoprocessorHost(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index a29d675..17d6215 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -29,6 +29,8 @@ import java.util.Map; import java.util.Set; import java.util.TreeMap; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -40,16 +42,11 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - /** * Region state accountant. It holds the states of all regions in the memory. * In normal scenario, it should match the meta table and the true region states. @@ -223,14 +220,14 @@ public class RegionStates { */ public synchronized boolean isRegionOffline(final HRegionInfo hri) { return getRegionState(hri) == null || (!isRegionInTransition(hri) - && isRegionInState(hri, State.OFFLINE, State.CLOSED)); + && isRegionInState(hri, RegionState.State.OFFLINE, RegionState.State.CLOSED)); } /** * @return True if specified region is in one of the specified states. */ public boolean isRegionInState( - final HRegionInfo hri, final State... states) { + final HRegionInfo hri, final RegionState.State... states) { return isRegionInState(hri.getEncodedName(), states); } @@ -238,7 +235,7 @@ public class RegionStates { * @return True if specified region is in one of the specified states. */ public boolean isRegionInState( - final String encodedName, final State... states) { + final String encodedName, final RegionState.State... states) { RegionState regionState = getRegionState(encodedName); return isOneOfStates(regionState, states); } @@ -300,12 +297,12 @@ public class RegionStates { * @return the current state */ public synchronized RegionState createRegionState(final HRegionInfo hri, - State newState, ServerName serverName, ServerName lastHost) { - if (newState == null || (newState == State.OPEN && serverName == null)) { - newState = State.OFFLINE; + RegionState.State newState, ServerName serverName, ServerName lastHost) { + if (newState == null || (newState == RegionState.State.OPEN && serverName == null)) { + newState = RegionState.State.OFFLINE; } if (hri.isOffline() && hri.isSplit()) { - newState = State.SPLIT; + newState = RegionState.State.SPLIT; serverName = null; } String encodedName = hri.getEncodedName(); @@ -316,7 +313,7 @@ public class RegionStates { } else { regionState = new RegionState(hri, newState, serverName); regionStates.put(encodedName, regionState); - if (newState == State.OPEN) { + if (newState == RegionState.State.OPEN) { if (!serverName.equals(lastHost)) { LOG.warn("Open region's last host " + lastHost + " should be the same as the current one " + serverName @@ -328,7 +325,7 @@ public class RegionStates { } else if (!regionState.isUnassignable()) { regionsInTransition.put(encodedName, regionState); } - if (lastHost != null && newState != State.SPLIT) { + if (lastHost != null && newState != RegionState.State.SPLIT) { addToServerHoldings(lastHost, hri); } } @@ -339,7 +336,7 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ public RegionState updateRegionState( - final HRegionInfo hri, final State state) { + final HRegionInfo hri, final RegionState.State state) { RegionState regionState = getRegionState(hri.getEncodedName()); return updateRegionState(hri, state, regionState == null ? null : regionState.getServerName()); @@ -349,7 +346,7 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ public RegionState updateRegionState( - final HRegionInfo hri, final State state, final ServerName serverName) { + final HRegionInfo hri, final RegionState.State state, final ServerName serverName) { return updateRegionState(hri, state, serverName, HConstants.NO_SEQNUM); } @@ -374,7 +371,7 @@ public class RegionStates { + " was opened on a dead server: " + serverName); return; } - updateRegionState(hri, State.OPEN, serverName, openSeqNum); + updateRegionState(hri, RegionState.State.OPEN, serverName, openSeqNum); synchronized (this) { regionsInTransition.remove(hri.getEncodedName()); @@ -490,26 +487,26 @@ public class RegionStates { * Split/Merged/Offline/null(=Offline)/SplittingNew/MergingNew. */ public void regionOffline( - final HRegionInfo hri, final State expectedState) { + final HRegionInfo hri, final RegionState.State expectedState) { Preconditions.checkArgument(expectedState == null || RegionState.isUnassignable(expectedState), "Offlined region should not be " + expectedState); - if (isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) { + if (isRegionInState(hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) { // Remove it from all region maps deleteRegion(hri); return; } - State newState = - expectedState == null ? State.OFFLINE : expectedState; + RegionState.State newState = + expectedState == null ? RegionState.State.OFFLINE : expectedState; updateRegionState(hri, newState); synchronized (this) { regionsInTransition.remove(hri.getEncodedName()); ServerName oldServerName = regionAssignments.remove(hri); if (oldServerName != null && serverHoldings.containsKey(oldServerName) - && (newState == State.MERGED || newState == State.SPLIT + && (newState == RegionState.State.MERGED || newState == RegionState.State.SPLIT || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING))) { + TableState.State.DISABLED, TableState.State.DISABLING))) { // Offline the region only if it's merged/split, or the table is disabled/disabling. // Otherwise, offline it from this server only when it is online on a different server. LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); @@ -536,7 +533,7 @@ public class RegionStates { // Offline open regions, no need to offline if SPLIT/MERGED/OFFLINE if (isRegionOnline(region)) { regionsToOffline.add(region); - } else if (isRegionInState(region, State.SPLITTING, State.MERGING)) { + } else if (isRegionInState(region, RegionState.State.SPLITTING, RegionState.State.MERGING)) { LOG.debug("Offline splitting/merging region " + getRegionState(region)); regionsToOffline.add(region); } @@ -778,7 +775,7 @@ public class RegionStates { Map allUserRegions = new HashMap(toBeClosed.size()); for (HRegionInfo hri: toBeClosed) { - RegionState regionState = updateRegionState(hri, State.CLOSED); + RegionState regionState = updateRegionState(hri, RegionState.State.CLOSED); allUserRegions.put(hri, regionState.getServerName()); } return allUserRegions; @@ -918,9 +915,9 @@ public class RegionStates { } } - static boolean isOneOfStates(RegionState regionState, State... states) { - State s = regionState != null ? regionState.getState() : null; - for (State state: states) { + static boolean isOneOfStates(RegionState regionState, RegionState.State... states) { + RegionState.State s = regionState != null ? regionState.getState() : null; + for (RegionState.State state: states) { if (s == state) return true; } return false; @@ -930,8 +927,8 @@ public class RegionStates { * Update a region state. It will be put in transition if not already there. */ private RegionState updateRegionState(final HRegionInfo hri, - final State state, final ServerName serverName, long openSeqNum) { - if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) { + final RegionState.State state, final ServerName serverName, long openSeqNum) { + if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) { LOG.warn("Failed to open/close " + hri.getShortNameToLog() + " on " + serverName + ", set to " + state); } @@ -952,8 +949,8 @@ public class RegionStates { // For these states, region should be properly closed. // There should be no log splitting issue. - if ((state == State.CLOSED || state == State.MERGED - || state == State.SPLIT) && lastAssignments.containsKey(encodedName)) { + if ((state == RegionState.State.CLOSED || state == RegionState.State.MERGED + || state == RegionState.State.SPLIT) && lastAssignments.containsKey(encodedName)) { ServerName last = lastAssignments.get(encodedName); if (last.equals(serverName)) { lastAssignments.remove(encodedName); @@ -964,7 +961,7 @@ public class RegionStates { } // Once a region is opened, record its last assignment right away. - if (serverName != null && state == State.OPEN) { + if (serverName != null && state == RegionState.State.OPEN) { ServerName last = lastAssignments.get(encodedName); if (!serverName.equals(last)) { lastAssignments.put(encodedName, serverName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index a78e225..8ad0d1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -258,7 +259,7 @@ public class TableNamespaceManager { } // Now check if the table is assigned, if not then fail fast - if (isTableAssigned()) { + if (isTableAssigned() && isTableEnabled()) { try { nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME); zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); @@ -298,6 +299,12 @@ public class TableNamespaceManager { return false; } + private boolean isTableEnabled() throws IOException { + return masterServices.getTableStateManager().getTableState( + TableName.NAMESPACE_TABLE_NAME + ).equals(TableState.State.ENABLED); + } + private boolean isTableAssigned() { return !masterServices.getAssignmentManager() .getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java new file mode 100644 index 0000000..26b1901 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.TableState; + +/** + * This is a helper class used to manage table states. + * States persisted in tableinfo and cached internally. + */ +@InterfaceAudience.Private +public class TableStateManager { + private static final Log LOG = LogFactory.getLog(TableStateManager.class); + private final TableDescriptors descriptors; + + private final Map tableStates = Maps.newConcurrentMap(); + + public TableStateManager(MasterServices master) { + this.descriptors = master.getTableDescriptors(); + } + + public void start() throws IOException { + Map all = descriptors.getAllDescriptors(); + for (TableDescriptor table : all.values()) { + TableName tableName = table.getHTableDescriptor().getTableName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Adding table state: " + tableName + + ": " + table.getTableState()); + } + tableStates.put(tableName, table.getTableState()); + } + } + + /** + * Set table state to provided. + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @throws IOException + */ + public void setTableState(TableName tableName, TableState.State newState) throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (descriptor.getTableState() != newState) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + } + } + } + + /** + * Set table state to provided but only if table in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @param states states to check against + * @throws IOException + */ + public boolean setTableStateIfInStates(TableName tableName, + TableState.State newState, + TableState.State... states) + throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + return true; + } else { + return false; + } + } + } + + + /** + * Set table state to provided but only if table not in specified states + * Caller should lock table on write. + * @param tableName table to change state for + * @param newState new state + * @param states states to check against + * @throws IOException + */ + public boolean setTableStateIfNotInStates(TableName tableName, + TableState.State newState, + TableState.State... states) + throws IOException { + synchronized (tableStates) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor == null) { + throw new TableNotFoundException(tableName); + } + if (!TableState.isInStates(descriptor.getTableState(), states)) { + writeDescriptor( + new TableDescriptor(descriptor.getHTableDescriptor(), newState)); + return true; + } else { + return false; + } + } + } + + public boolean isTableState(TableName tableName, TableState.State... states) { + TableState.State tableState = null; + try { + tableState = getTableState(tableName); + } catch (IOException e) { + LOG.error("Unable to get table state, probably table not exists"); + return false; + } + return tableState != null && TableState.isInStates(tableState, states); + } + + public void setDeletedTable(TableName tableName) throws IOException { + TableState.State remove = tableStates.remove(tableName); + if (remove == null) { + LOG.warn("Moving table " + tableName + " state to deleted but was " + + "already deleted"); + } + } + + public boolean isTablePresent(TableName tableName) throws IOException { + return getTableState(tableName) != null; + } + + /** + * Return all tables in given states. + * + * @param states filter by states + * @return tables in given states + * @throws IOException + */ + public Set getTablesInStates(TableState.State... states) throws IOException { + Set rv = Sets.newHashSet(); + for (Map.Entry entry : tableStates.entrySet()) { + if (TableState.isInStates(entry.getValue(), states)) + rv.add(entry.getKey()); + } + return rv; + } + + public TableState.State getTableState(TableName tableName) throws IOException { + TableState.State tableState = tableStates.get(tableName); + if (tableState == null) { + TableDescriptor descriptor = readDescriptor(tableName); + if (descriptor != null) + tableState = descriptor.getTableState(); + } + return tableState; + } + + /** + * Write descriptor in place, update cache of states. + * Write lock should be hold by caller. + * + * @param descriptor what to write + */ + private void writeDescriptor(TableDescriptor descriptor) throws IOException { + TableName tableName = descriptor.getHTableDescriptor().getTableName(); + TableState.State state = descriptor.getTableState(); + descriptors.add(descriptor); + LOG.debug("Table " + tableName + " written descriptor for state " + state); + tableStates.put(tableName, state); + LOG.debug("Table " + tableName + " updated state to " + state); + } + + /** + * Read current descriptor for table, update cache of states. + * + * @param table descriptor to read + * @return descriptor + * @throws IOException + */ + private TableDescriptor readDescriptor(TableName tableName) throws IOException { + TableDescriptor descriptor = descriptors.getDescriptor(tableName); + if (descriptor == null) + tableStates.remove(tableName); + else + tableStates.put(tableName, descriptor.getTableState()); + return descriptor; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 3a86128..e584008 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -31,14 +31,16 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.ipc.RequestContext; @@ -49,7 +51,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -121,8 +122,6 @@ public class CreateTableHandler extends EventHandler { if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { throw new TableExistsException(tableName); } - - checkAndSetEnablingTable(assignmentManager, tableName); success = true; } finally { if (!success) { @@ -132,47 +131,6 @@ public class CreateTableHandler extends EventHandler { return this; } - static void checkAndSetEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName) throws IOException { - // If we have multiple client threads trying to create the table at the - // same time, given the async nature of the operation, the table - // could be in a state where hbase:meta table hasn't been updated yet in - // the process() function. - // Use enabling state to tell if there is already a request for the same - // table in progress. This will introduce a new zookeeper call. Given - // createTable isn't a frequent operation, that should be ok. - // TODO: now that we have table locks, re-evaluate above -- table locks are not enough. - // We could have cleared the hbase.rootdir and not zk. How can we detect this case? - // Having to clean zk AND hdfs is awkward. - try { - if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(tableName, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - throw new TableExistsException(tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a ZooKeeper issue", e); - } - } - - static void removeEnablingTable(final AssignmentManager assignmentManager, - final TableName tableName) { - // Try deleting the enabling node in case of error - // If this does not happen then if the client tries to create the table - // again with the same Active master - // It will block the creation saying TableAlreadyExists. - try { - assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, false); - } catch (CoordinatedStateException e) { - // Keeper exception should not happen here - LOG.error("Got a keeper exception while removing the ENABLING table znode " - + tableName, e); - } - } - @Override public String toString() { String name = "UnknownServerName"; @@ -218,9 +176,6 @@ public class CreateTableHandler extends EventHandler { releaseTableLock(); LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " + (exception == null ? "successful" : "failed. " + exception)); - if (exception != null) { - removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName()); - } } /** @@ -243,9 +198,12 @@ public class CreateTableHandler extends EventHandler { FileSystem fs = fileSystemManager.getFileSystem(); // 1. Create Table Descriptor + // using a copy of descriptor, table will be created enabling first + TableDescriptor underConstruction = new TableDescriptor( + this.hTableDescriptor, TableState.State.ENABLING); Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - tempTableDir, this.hTableDescriptor, false); + tempTableDir, underConstruction, false); Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName); // 2. Create Regions @@ -271,20 +229,15 @@ public class CreateTableHandler extends EventHandler { ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); } - // 8. Set table enabled flag up in zk. - try { - assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that " + tableName + " will be" + - " enabled because of a ZooKeeper issue", e); - } + // 6. Enable table + assignmentManager.getTableStateManager().setTableState(tableName, + TableState.State.ENABLED); } /** * Create any replicas for the regions (the default replicas that was * already created is passed to the method) - * @param hTableDescriptor + * @param hTableDescriptor descriptor to use * @param regions default replicas * @return the combined list of default and non-default replicas */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 730da73..eed73e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -58,7 +59,7 @@ public class DeleteTableHandler extends TableEventHandler { @Override protected void prepareWithTableLock() throws IOException { // The next call fails if no such table. - hTableDescriptor = getTableDescriptor(); + hTableDescriptor = getTableDescriptor().getHTableDescriptor(); } protected void waitRegionInTransition(final List regions) @@ -116,7 +117,7 @@ public class DeleteTableHandler extends TableEventHandler { LOG.debug("Removing '" + tableName + "' from region states."); am.getRegionStates().tableDeleted(tableName); - // 5. If entry for this table in zk, and up in AssignmentManager, remove it. + // 5. If entry for this table states, remove it. LOG.debug("Marking '" + tableName + "' as deleted."); am.getTableStateManager().setDeletedTable(tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index fb7aec8..07843a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -25,13 +25,13 @@ import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -39,11 +39,10 @@ import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.BulkAssigner; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.htrace.Trace; /** @@ -91,16 +90,11 @@ public class DisableTableHandler extends EventHandler { // DISABLED or ENABLED. //TODO: reevaluate this since we have table locks now if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.DISABLING, - ZooKeeperProtos.Table.State.ENABLED)) { - LOG.info("Table " + tableName + " isn't enabled; skipping disable"); - throw new TableNotEnabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " disabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.DISABLING, + TableState.State.ENABLED)) { + LOG.info("Table " + tableName + " isn't enabled; skipping disable"); + throw new TableNotEnabledException(this.tableName); } } success = true; @@ -138,8 +132,6 @@ public class DisableTableHandler extends EventHandler { } } catch (IOException e) { LOG.error("Error trying to disable table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to disable table " + this.tableName, e); } finally { releaseTableLock(); } @@ -155,10 +147,10 @@ public class DisableTableHandler extends EventHandler { } } - private void handleDisableTable() throws IOException, CoordinatedStateException { + private void handleDisableTable() throws IOException { // Set table disabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); boolean done = false; while (true) { // Get list of online regions that are of this table. Regions that are @@ -187,7 +179,7 @@ public class DisableTableHandler extends EventHandler { } // Flip the table to disabled if success. if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); LOG.info("Disabled table, " + this.tableName + ", is done=" + done); } @@ -207,7 +199,7 @@ public class DisableTableHandler extends EventHandler { RegionStates regionStates = assignmentManager.getRegionStates(); for (HRegionInfo region: regions) { if (regionStates.isRegionInTransition(region) - && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) { + && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { continue; } final HRegionInfo hri = region; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index b8edc0b..5771202 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -27,7 +27,6 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Pair; /** @@ -95,16 +94,8 @@ public class EnableTableHandler extends EventHandler { // retainAssignment is true only during recovery. In normal case it is false if (!this.skipTableStateCheck) { throw new TableNotFoundException(tableName); - } - try { - this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName, - ZooKeeperProtos.Table.State.ENABLING, true); - throw new TableNotFoundException(tableName); - } catch (CoordinatedStateException e) { - // TODO : Use HBCK to clear such nodes - LOG.warn("Failed to delete the ENABLING node for the table " + tableName - + ". The table will remain unusable. Run HBCK to manually fix the problem."); } + this.assignmentManager.getTableStateManager().setDeletedTable(tableName); } // There could be multiple client requests trying to disable or enable @@ -112,16 +103,11 @@ public class EnableTableHandler extends EventHandler { // After that, no other requests can be accepted until the table reaches // DISABLED or ENABLED. if (!skipTableStateCheck) { - try { - if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( - this.tableName, ZooKeeperProtos.Table.State.ENABLING, - ZooKeeperProtos.Table.State.DISABLED)) { - LOG.info("Table " + tableName + " isn't disabled; skipping enable"); - throw new TableNotDisabledException(this.tableName); - } - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that the table will be" + - " enabling because of a coordination engine issue", e); + if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates( + this.tableName, TableState.State.ENABLING, + TableState.State.DISABLED)) { + LOG.info("Table " + tableName + " isn't disabled; skipping enable"); + throw new TableNotDisabledException(this.tableName); } } success = true; @@ -156,11 +142,7 @@ public class EnableTableHandler extends EventHandler { if (cpHost != null) { cpHost.postEnableTableHandler(this.tableName); } - } catch (IOException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (CoordinatedStateException e) { - LOG.error("Error trying to enable the table " + this.tableName, e); - } catch (InterruptedException e) { + } catch (IOException | InterruptedException e) { LOG.error("Error trying to enable the table " + this.tableName, e); } finally { releaseTableLock(); @@ -177,14 +159,14 @@ public class EnableTableHandler extends EventHandler { } } - private void handleEnableTable() throws IOException, CoordinatedStateException, + private void handleEnableTable() throws IOException, InterruptedException { // I could check table is disabling and if so, not enable but require // that user first finish disabling but that might be obnoxious. // Set table enabling flag up in zk. this.assignmentManager.getTableStateManager().setTableState(this.tableName, - ZooKeeperProtos.Table.State.ENABLING); + TableState.State.ENABLING); boolean done = false; ServerManager serverManager = ((HMaster)this.server).getServerManager(); // Get the regions of this table. We're done when all listed @@ -236,7 +218,7 @@ public class EnableTableHandler extends EventHandler { if (done) { // Flip the table to enabled. this.assignmentManager.getTableStateManager().setTableState( - this.tableName, ZooKeeperProtos.Table.State.ENABLED); + this.tableName, TableState.State.ENABLED); LOG.info("Table '" + this.tableName + "' was successfully enabled. Status: done=" + done); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index 591a1d8..5f50235 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -65,8 +67,9 @@ public class ModifyTableHandler extends TableEventHandler { // Check operation is possible on the table in its current state // Also checks whether the table exists if (masterServices.getAssignmentManager().getTableStateManager() - .isTableState(this.htd.getTableName(), ZooKeeperProtos.Table.State.ENABLED) - && this.htd.getRegionReplication() != getTableDescriptor().getRegionReplication()) { + .isTableState(this.htd.getTableName(), TableState.State.ENABLED) + && this.htd.getRegionReplication() != getTableDescriptor() + .getHTableDescriptor().getRegionReplication()) { throw new IOException("REGION_REPLICATION change is not supported for enabled tables"); } } @@ -79,10 +82,12 @@ public class ModifyTableHandler extends TableEventHandler { cpHost.preModifyTableHandler(this.tableName, this.htd); } // Update descriptor - HTableDescriptor oldHtd = getTableDescriptor(); - this.masterServices.getTableDescriptors().add(this.htd); - deleteFamilyFromFS(hris, oldHtd.getFamiliesKeys()); - removeReplicaColumnsIfNeeded(this.htd.getRegionReplication(), oldHtd.getRegionReplication(), + TableDescriptor updated = getTableDescriptor(); + updated.setHTableDescriptor(this.htd); + this.masterServices.getTableDescriptors().add(updated); + deleteFamilyFromFS(hris, updated.getHTableDescriptor().getFamiliesKeys()); + removeReplicaColumnsIfNeeded(this.htd.getRegionReplication(), + updated.getHTableDescriptor().getRegionReplication(), htd.getTableName()); if (cpHost != null) { cpHost.postModifyTableHandler(this.tableName, this.htd); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 7898edc..f7aa0d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -39,10 +40,8 @@ import org.apache.hadoop.hbase.master.DeadServer; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; /** @@ -231,23 +230,23 @@ public class ServerShutdownHandler extends EventHandler { continue; } LOG.info("Reassigning region with rs = " + rit); - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } else if (regionStates.isRegionInState( - hri, State.SPLITTING_NEW, State.MERGING_NEW)) { - regionStates.updateRegionState(hri, State.OFFLINE); + hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) { + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); } toAssignRegions.add(hri); } else if (rit != null) { if ((rit.isPendingCloseOrClosing() || rit.isOffline()) && am.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) || + TableState.State.DISABLED, TableState.State.DISABLING) || am.getReplicasToClose().contains(hri)) { // If the table was partially disabled and the RS went down, we should clear the RIT // and remove the node for the region. // The rit that we use may be stale in case the table was in DISABLING state // but though we did assign we will not be clearing the znode in CLOSING state. // Doing this will have no harm. See HBASE-5927 - regionStates.updateRegionState(hri, State.OFFLINE); + regionStates.updateRegionState(hri, RegionState.State.OFFLINE); am.offlineDisabledRegion(hri); } else { LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition " @@ -323,7 +322,7 @@ public class ServerShutdownHandler extends EventHandler { } // If table is not disabled but the region is offlined, boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLED); + TableState.State.DISABLED); if (disabled){ LOG.info("The table " + hri.getTable() + " was disabled. Hence not proceeding."); @@ -336,7 +335,7 @@ public class ServerShutdownHandler extends EventHandler { return false; } boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(), - ZooKeeperProtos.Table.State.DISABLING); + TableState.State.DISABLING); if (disabling) { LOG.info("The table " + hri.getTable() + " is disabled. Hence not assigning region" + hri.getEncodedName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java index cd8fe9e..ee32a32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -50,8 +51,8 @@ public class TableAddFamilyHandler extends TableEventHandler { @Override protected void prepareWithTableLock() throws IOException { super.prepareWithTableLock(); - HTableDescriptor htd = getTableDescriptor(); - if (htd.hasFamily(familyDesc.getName())) { + TableDescriptor htd = getTableDescriptor(); + if (htd.getHTableDescriptor().hasFamily(familyDesc.getName())) { throw new InvalidFamilyOperationException("Family '" + familyDesc.getNameAsString() + "' already exists so cannot be added"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 330b9d8..b166be0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -50,7 +50,7 @@ public class TableDeleteFamilyHandler extends TableEventHandler { @Override protected void prepareWithTableLock() throws IOException { super.prepareWithTableLock(); - HTableDescriptor htd = getTableDescriptor(); + HTableDescriptor htd = getTableDescriptor().getHTableDescriptor(); this.familyName = hasColumnFamily(htd, familyName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 4f1c39d..8993840 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -40,12 +41,12 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import com.google.common.collect.Lists; @@ -130,7 +131,7 @@ public abstract class TableEventHandler extends EventHandler { handleTableOperation(hris); if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. getAssignmentManager().getTableStateManager().isTableState( - tableName, ZooKeeperProtos.Table.State.ENABLED)) { + tableName, TableState.State.ENABLED)) { if (reOpenAllRegions(hris)) { LOG.info("Completed table operation " + eventType + " on table " + tableName); @@ -230,10 +231,10 @@ public abstract class TableEventHandler extends EventHandler { * @throws FileNotFoundException * @throws IOException */ - public HTableDescriptor getTableDescriptor() + public TableDescriptor getTableDescriptor() throws FileNotFoundException, IOException { - HTableDescriptor htd = - this.masterServices.getTableDescriptors().get(tableName); + TableDescriptor htd = + this.masterServices.getTableDescriptors().getDescriptor(tableName); if (htd == null) { throw new IOException("HTableDescriptor missing for " + tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java index d07d0aa..75ec79c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -49,7 +50,7 @@ public class TableModifyFamilyHandler extends TableEventHandler { @Override protected void prepareWithTableLock() throws IOException { super.prepareWithTableLock(); - HTableDescriptor htd = getTableDescriptor(); + HTableDescriptor htd = getTableDescriptor().getHTableDescriptor(); hasColumnFamily(htd, familyDesc.getName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index 086d1d5..6514a27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -28,15 +28,17 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; @@ -93,54 +95,44 @@ public class TruncateTableHandler extends DeleteTableHandler { AssignmentManager assignmentManager = this.masterServices.getAssignmentManager(); - // 1. Set table znode - CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName); - try { - // 1. Create Table Descriptor - new FSTableDescriptors(server.getConfiguration()) - .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false); - Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); - Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); - - HRegionInfo[] newRegions; - if (this.preserveSplits) { - newRegions = regions.toArray(new HRegionInfo[regions.size()]); - LOG.info("Truncate will preserve " + newRegions.length + " regions"); - } else { - newRegions = new HRegionInfo[1]; - newRegions[0] = new HRegionInfo(this.tableName, null, null); - LOG.info("Truncate will not preserve the regions"); - } - - // 2. Create Regions - List regionInfos = ModifyRegionUtils.createRegions( - masterServices.getConfiguration(), tempdir, - this.hTableDescriptor, newRegions, null); - - // 3. Move Table temp directory to the hbase root location - if (!fs.rename(tempTableDir, tableDir)) { - throw new IOException("Unable to move table from temp=" + tempTableDir + - " to hbase root=" + tableDir); - } - - // 4. Add regions to META - MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), - regionInfos); - - // 5. Trigger immediate assignment of the regions in round-robin fashion - ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); - - // 6. Set table enabled flag up in zk. - try { - assignmentManager.getTableStateManager().setTableState(tableName, - ZooKeeperProtos.Table.State.ENABLED); - } catch (CoordinatedStateException e) { - throw new IOException("Unable to ensure that " + tableName + " will be" + - " enabled because of a ZooKeeper issue", e); - } - } catch (IOException e) { - CreateTableHandler.removeEnablingTable(assignmentManager, tableName); - throw e; + // 1. Create Table Descriptor + TableDescriptor underConstruction = new TableDescriptor( + this.hTableDescriptor, TableState.State.ENABLING); + new FSTableDescriptors(server.getConfiguration()) + .createTableDescriptorForTableDirectory(tempdir, underConstruction, false); + Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName); + Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName); + + HRegionInfo[] newRegions; + if (this.preserveSplits) { + newRegions = regions.toArray(new HRegionInfo[regions.size()]); + LOG.info("Truncate will preserve " + newRegions.length + " regions"); + } else { + newRegions = new HRegionInfo[1]; + newRegions[0] = new HRegionInfo(this.tableName, null, null); + LOG.info("Truncate will not preserve the regions"); } + + // 2. Create Regions + List regionInfos = ModifyRegionUtils.createRegions( + masterServices.getConfiguration(), tempdir, + this.hTableDescriptor, newRegions, null); + + // 3. Move Table temp directory to the hbase root location + if (!fs.rename(tempTableDir, tableDir)) { + throw new IOException("Unable to move table from temp=" + tempTableDir + + " to hbase root=" + tableDir); + } + + // 4. Add regions to META + MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), + regionInfos); + + // 5. Trigger immediate assignment of the regions in round-robin fashion + ModifyRegionUtils.assignRegions(assignmentManager, regionInfos); + + // 6. Set table enabled flag up in zk. + assignmentManager.getTableStateManager().setTableState(tableName, + TableState.State.ENABLED); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index bfa5004..e5847aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RequestContext; @@ -566,14 +567,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable TableName snapshotTable = TableName.valueOf(snapshot.getTable()); AssignmentManager assignmentMgr = master.getAssignmentManager(); if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.ENABLED)) { + TableState.State.ENABLED)) { LOG.debug("Table enabled, starting distributed snapshot."); snapshotEnabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); } // For disabled table, snapshot is created by the master else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable, - ZooKeeperProtos.Table.State.DISABLED)) { + TableState.State.DISABLED)) { LOG.debug("Table is disabled, running snapshot entirely on master."); snapshotDisabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); @@ -705,8 +706,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // Execute the restore/clone operation if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) { - if (master.getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) { + if (master.getTableStateManager().isTableState( + TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { throw new UnsupportedOperationException("Table '" + TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " + "perform a restore operation" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java index b11d74c..1649c4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -39,12 +39,14 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; @@ -374,7 +376,7 @@ public class NamespaceUpgrade implements Tool { HTableDescriptor newDesc = new HTableDescriptor(oldDesc); newDesc.setName(newTableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( - newTablePath, newDesc, true); + newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 4417bd9..7405272 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; @@ -112,13 +113,14 @@ public class CompactionTool extends Configured implements Tool { if (isFamilyDir(fs, path)) { Path regionDir = path.getParent(); Path tableDir = regionDir.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri, + path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - compactRegion(tableDir, htd, path, compactOnce, major); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major); } else if (isTableDir(fs, path)) { compactTable(path, compactOnce, major); } else { @@ -129,9 +131,9 @@ public class CompactionTool extends Configured implements Tool { private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) throws IOException { - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { - compactRegion(tableDir, htd, regionDir, compactOnce, major); + compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 1766d08..cf5b126 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -46,6 +46,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -56,7 +59,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -66,7 +68,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.TableStateManager; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.ConnectionUtils; @@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -92,7 +94,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; @@ -112,10 +113,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.RemoteException; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.protobuf.ServiceException; - /** * This class is responsible for splitting up a bunch of regionserver commit log * files that are no longer being written to, into new files, one per region for @@ -286,12 +283,13 @@ public class HLogSplitter { return true; } if(csm != null) { - try { - TableStateManager tsm = csm.getTableStateManager(); - disablingOrDisabledTables = tsm.getTablesInStates( - ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); - } catch (CoordinatedStateException e) { - throw new IOException("Can't get disabling/disabled tables", e); + HConnection scc = csm.getServer().getShortCircuitConnection(); + TableName[] tables = scc.listTableNames(); + for (TableName table : tables) { + if (scc.getTableState(table) + .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) { + disablingOrDisabledTables.add(table); + } } } int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java index 0038423..7a03427 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java @@ -89,6 +89,7 @@ public class WALCellCodec implements Codec { * Fully prepares the codec for use. * @param conf {@link Configuration} to read for the user-specified codec. If none is specified, * uses a {@link WALCellCodec}. + * @param cellCodecClsName name of codec * @param compression compression the codec should use * @return a {@link WALCellCodec} ready for use. * @throws UnsupportedOperationException if the codec cannot be instantiated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 47c6ebf..d6d4f71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -38,6 +38,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; @@ -259,7 +261,8 @@ public class SnapshotManifest { private void load() throws IOException { switch (getSnapshotFormat(desc)) { case SnapshotManifestV1.DESCRIPTOR_VERSION: { - this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir); + this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir) + .getHTableDescriptor(); ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader"); try { this.regionManifests = @@ -353,7 +356,8 @@ public class SnapshotManifest { LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory new FSTableDescriptors(fs, rootDir) - .createTableDescriptorForTableDirectory(workingDir, htd, false); + .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor( + htd, TableState.State.ENABLED), false); } else { LOG.debug("Convert to Single Snapshot Manifest"); convertToV2SingleManifest(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 09749d0..2345600 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.util; +import javax.annotation.Nullable; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Comparator; @@ -38,7 +39,9 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -92,11 +95,11 @@ public class FSTableDescriptors implements TableDescriptors { * Data structure to hold modification time and table descriptor. */ private static class TableDescriptorAndModtime { - private final HTableDescriptor htd; + private final TableDescriptor td; private final long modtime; - TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) { - this.htd = htd; + TableDescriptorAndModtime(final long modtime, final TableDescriptor td) { + this.td = td; this.modtime = modtime; } @@ -104,8 +107,16 @@ public class FSTableDescriptors implements TableDescriptors { return this.modtime; } - HTableDescriptor getTableDescriptor() { - return this.htd; + TableDescriptor getTableDescriptor() { + return this.td; + } + + HTableDescriptor getHTableDescriptor() { + return this.td.getHTableDescriptor(); + } + + TableState.State getTableState() { + return this.td.getTableState(); } } @@ -141,12 +152,13 @@ public class FSTableDescriptors implements TableDescriptors { * to see if a newer file has been created since the cached one was read. */ @Override - public HTableDescriptor get(final TableName tablename) + @Nullable + public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) { cachehits++; - return HTableDescriptor.META_TABLEDESC; + return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED); } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. @@ -183,31 +195,62 @@ public class FSTableDescriptors implements TableDescriptors { } /** + * Get the current table descriptor for the given table, or null if none exists. + * + * Uses a local cache of the descriptor but still checks the filesystem on each call + * to see if a newer file has been created since the cached one was read. + */ + @Override + public HTableDescriptor get(TableName tableName) throws IOException { + if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) { + cachehits++; + return HTableDescriptor.META_TABLEDESC; + } + TableDescriptor descriptor = getDescriptor(tableName); + return descriptor == null ? null : descriptor.getHTableDescriptor(); + } + + /** * Returns a map from table name to table descriptor for all tables. */ @Override - public Map getAll() + public Map getAllDescriptors() throws IOException { - Map htds = new TreeMap(); + Map tds = new TreeMap(); List tableDirs = FSUtils.getTableDirs(fs, rootdir); for (Path d: tableDirs) { - HTableDescriptor htd = null; + TableDescriptor htd = null; try { - htd = get(FSUtils.getTableName(d)); + htd = getDescriptor(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { // inability of retrieving one HTD shouldn't stop getting the remaining LOG.warn("Trouble retrieving htd", fnfe); } if (htd == null) continue; - htds.put(htd.getTableName().getNameAsString(), htd); + tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd); } - return htds; + return tds; } - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) + /** + * Returns a map from table name to table descriptor for all tables. */ @Override + public Map getAll() throws IOException { + Map htds = new TreeMap(); + Map allDescriptors = getAllDescriptors(); + for (Map.Entry entry : allDescriptors + .entrySet()) { + htds.put(entry.getKey(), entry.getValue().getHTableDescriptor()); + } + return htds; + } + + /** + * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors( + * org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) + */ + @Override public Map getByNamespace(String name) throws IOException { Map htds = new TreeMap(); @@ -232,20 +275,46 @@ public class FSTableDescriptors implements TableDescriptors { * and updates the local cache with it. */ @Override - public void add(HTableDescriptor htd) throws IOException { + public void add(TableDescriptor htd) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } - if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { + TableName tableName = htd.getHTableDescriptor().getTableName(); + if (TableName.META_TABLE_NAME.equals(tableName)) { throw new NotImplementedException(); } - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( - "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); + "Cannot add a table descriptor for a reserved subdirectory name: " + + htd.getHTableDescriptor().getNameAsString()); } updateTableDescriptor(htd); - long modtime = getTableInfoModtime(htd.getTableName()); - this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd)); + } + + /** + * Adds (or updates) the table descriptor to the FileSystem + * and updates the local cache with it. + */ + @Override + public void add(HTableDescriptor htd) throws IOException { + if (fsreadonly) { + throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); + } + TableName tableName = htd.getTableName(); + if (TableName.META_TABLE_NAME.equals(tableName)) { + throw new NotImplementedException(); + } + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { + throw new NotImplementedException( + "Cannot add a table descriptor for a reserved subdirectory name: " + + htd.getNameAsString()); + } + TableDescriptor descriptor = getDescriptor(htd.getTableName()); + if (descriptor == null) + descriptor = new TableDescriptor(htd); + else + descriptor.setHTableDescriptor(htd); + updateTableDescriptor(descriptor); } /** @@ -266,7 +335,7 @@ public class FSTableDescriptors implements TableDescriptors { } } TableDescriptorAndModtime tdm = this.cache.remove(tablename); - return tdm == null ? null : tdm.getTableDescriptor(); + return tdm == null ? null : tdm.getHTableDescriptor(); } /** @@ -463,7 +532,7 @@ public class FSTableDescriptors implements TableDescriptors { * if it exists, bypassing the local cache. * Returns null if it's not found. */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path hbaseRootDir, TableName tableName) throws IOException { Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); @@ -474,7 +543,7 @@ public class FSTableDescriptors implements TableDescriptors { * directly from the file system if it exists. * @throws TableInfoMissingException if there is no descriptor */ - public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) + public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir) throws IOException { FileStatus status = getTableInfoPath(fs, tableDir, false); if (status == null) { @@ -509,11 +578,11 @@ public class FSTableDescriptors implements TableDescriptors { if (status == null) { return null; } - HTableDescriptor htd = readTableDescriptor(fs, status, !fsreadonly); - return new TableDescriptorAndModtime(status.getModificationTime(), htd); + TableDescriptor td = readTableDescriptor(fs, status, !fsreadonly); + return new TableDescriptorAndModtime(status.getModificationTime(), td); } - private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, + private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status, boolean rewritePb) throws IOException { int len = Ints.checkedCast(status.getLen()); byte [] content = new byte[len]; @@ -523,9 +592,9 @@ public class FSTableDescriptors implements TableDescriptors { } finally { fsDataInputStream.close(); } - HTableDescriptor htd = null; + TableDescriptor td = null; try { - htd = HTableDescriptor.parseFrom(content); + td = TableDescriptor.parseFrom(content); } catch (DeserializationException e) { throw new IOException("content=" + Bytes.toShort(content), e); } @@ -533,25 +602,28 @@ public class FSTableDescriptors implements TableDescriptors { // Convert the file over to be pb before leaving here. Path tableInfoDir = status.getPath().getParent(); Path tableDir = tableInfoDir.getParent(); - writeTableDescriptor(fs, htd, tableDir, status); + writeTableDescriptor(fs, td, tableDir, status); } - return htd; + return td; } - + /** * Update table descriptor on the file system * @throws IOException Thrown if failed update. * @throws NotImplementedException if in read only mode */ - @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd) + @VisibleForTesting Path updateTableDescriptor(TableDescriptor td) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); } - Path tableDir = getTableDir(htd.getTableName()); - Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir)); + TableName tableName = td.getHTableDescriptor().getTableName(); + Path tableDir = getTableDir(tableName); + Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); + long modtime = getTableInfoModtime(tableName); + this.cache.put(tableName, new TableDescriptorAndModtime(modtime, td)); return p; } @@ -601,7 +673,7 @@ public class FSTableDescriptors implements TableDescriptors { * @return Descriptor file or null if we failed write. */ private static Path writeTableDescriptor(final FileSystem fs, - final HTableDescriptor htd, final Path tableDir, + final TableDescriptor htd, final Path tableDir, final FileStatus currentDescriptorFile) throws IOException { // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon. @@ -632,7 +704,7 @@ public class FSTableDescriptors implements TableDescriptors { } tableInfoDirPath = new Path(tableInfoDir, filename); try { - writeHTD(fs, tempPath, htd); + writeTD(fs, tempPath, htd); fs.mkdirs(tableInfoDirPath.getParent()); if (!fs.rename(tempPath, tableInfoDirPath)) { throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath); @@ -656,7 +728,7 @@ public class FSTableDescriptors implements TableDescriptors { return tableInfoDirPath; } - private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd) + private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { @@ -673,23 +745,40 @@ public class FSTableDescriptors implements TableDescriptors { * Used by tests. * @return True if we successfully created file. */ - public boolean createTableDescriptor(HTableDescriptor htd) throws IOException { + public boolean createTableDescriptor(TableDescriptor htd) throws IOException { return createTableDescriptor(htd, false); } /** + * Create new HTableDescriptor in HDFS. Happens when we are creating table. + * Used by tests. + * @return True if we successfully created file. + */ + public boolean createTableDescriptor(HTableDescriptor htd) throws IOException { + return createTableDescriptor(new TableDescriptor(htd), false); + } + + /** * Create new HTableDescriptor in HDFS. Happens when we are creating table. If * forceCreation is true then even if previous table descriptor is present it * will be overwritten * * @return True if we successfully created file. */ - public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation) + public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation) throws IOException { - Path tableDir = getTableDir(htd.getTableName()); + Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } - + + /** + * @see {@link #createTableDescriptor(org.apache.hadoop.hbase.TableDescriptor, boolean)} + */ + public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation) + throws IOException { + return createTableDescriptor(new TableDescriptor(htd), forceCreation); + } + /** * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create * a new table or snapshot a table. @@ -702,7 +791,7 @@ public class FSTableDescriptors implements TableDescriptors { * @throws IOException if a filesystem error occurs */ public boolean createTableDescriptorForTableDirectory(Path tableDir, - HTableDescriptor htd, boolean forceCreation) throws IOException { + TableDescriptor htd, boolean forceCreation) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 017153a..d5cb439 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.io.FileNotFoundException; import java.io.IOException; -import java.io.InterruptedIOException; import java.io.PrintWriter; import java.io.StringWriter; import java.net.URI; @@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -107,7 +108,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler; import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl; import org.apache.hadoop.hbase.util.hbck.TableLockChecker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.io.IOUtils; @@ -953,9 +953,9 @@ public class HBaseFsck extends Configured { modTInfo = new TableInfo(tableName); tablesInfo.put(tableName, modTInfo); try { - HTableDescriptor htd = + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName); - modTInfo.htds.add(htd); + modTInfo.htds.add(htd.getHTableDescriptor()); } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe); @@ -1009,7 +1009,7 @@ public class HBaseFsck extends Configured { for (String columnfamimly : columns) { htd.addFamily(new HColumnDescriptor(columnfamimly)); } - fstd.createTableDescriptor(htd, true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); return true; } @@ -1057,7 +1057,7 @@ public class HBaseFsck extends Configured { if (tableName.equals(htds[j].getTableName())) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); - fstd.createTableDescriptor(htd, true); + fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true); j++; iter.remove(); } @@ -1382,22 +1382,16 @@ public class HBaseFsck extends Configured { * @throws IOException */ private void loadDisabledTables() - throws ZooKeeperConnectionException, IOException { + throws IOException { HConnectionManager.execute(new HConnectable(getConf()) { @Override public Void connect(HConnection connection) throws IOException { - ZooKeeperWatcher zkw = createZooKeeperWatcher(); - try { - for (TableName tableName : - ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) { - disabledTables.add(tableName); + TableName[] tables = connection.listTableNames(); + for (TableName table : tables) { + if (connection.getTableState(table) + .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) { + disabledTables.add(table); } - } catch (KeeperException ke) { - throw new IOException(ke); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); } return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 98eb7e2..e910be5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -156,7 +156,8 @@ class HMerge { this.rootDir = FSUtils.getRootDir(conf); Path tabledir = FSUtils.getTableDir(this.rootDir, tableName); - this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir); + this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir) + .getHTableDescriptor(); String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME; this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index d50005b..25aeeed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -153,9 +154,9 @@ public class Merge extends Configured implements Tool { if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta); } - HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), + TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), this.rootdir, this.tableName); - HRegion merged = merge(htd, meta, info1, info2); + HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2); LOG.info("Adding " + merged.getRegionInfo() + " to " + meta.getRegionInfo()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java index f773b06..5425548 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java @@ -141,21 +141,7 @@ public class ZKDataMigrator extends Configured implements Tool { LOG.info("No table present to migrate table state to PB. returning.."); return; } - for (String table : tables) { - String znode = ZKUtil.joinZNode(zkw.tableZNode, table); - // Delete -ROOT- table state znode since its no longer present in 0.95.0 - // onwards. - if (table.equals("-ROOT-") || table.equals(".META.")) { - ZKUtil.deleteNode(zkw, znode); - continue; - } - byte[] data = ZKUtil.getData(zkw, znode); - if (ProtobufUtil.isPBMagicPrefix(data)) continue; - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data))); - data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(zkw, znode, data); - } + ZKUtil.deleteNodeRecursively(zkw, zkw.tableZNode); } private void checkAndMigrateReplicationNodesToPB(ZooKeeperWatcher zkw) throws KeeperException, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java deleted file mode 100644 index 1aff12f..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.io.InterruptedIOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * Implementation of TableStateManager which reads, caches and sets state - * up in ZooKeeper. If multiple read/write clients, will make for confusion. - * Code running on client side without consensus context should use - * {@link ZKTableStateClientSideReader} instead. - * - *

To save on trips to the zookeeper ensemble, internally we cache table - * state. - */ -@InterfaceAudience.Private -public class ZKTableStateManager implements TableStateManager { - // A znode will exist under the table directory if it is in any of the - // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, - // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will - // be no entry for a table in zk. Thats how it currently works. - - private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class); - private final ZooKeeperWatcher watcher; - - /** - * Cache of what we found in zookeeper so we don't have to go to zk ensemble - * for every query. Synchronize access rather than use concurrent Map because - * synchronization needs to span query of zk. - */ - private final Map cache = - new HashMap(); - - public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - super(); - this.watcher = zkw; - populateTableStates(); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @throws KeeperException, InterruptedException - */ - private void populateTableStates() throws KeeperException, InterruptedException { - synchronized (this.cache) { - List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); - if (children == null) return; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName); - if (state != null) this.cache.put(tableName, state); - } - } - } - - /** - * Sets table state in ZK. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException { - synchronized (this.cache) { - LOG.warn("Moving table " + tableName + " state from " + this.cache.get(tableName) - + " to " + state); - try { - setTableStateInZK(tableName, state); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - // Transition ENABLED->DISABLING has to be performed with a hack, because - // we treat empty state as enabled in this case because 0.92- clusters. - if ( - (newState == ZooKeeperProtos.Table.State.DISABLING) && - this.cache.get(tableName) != null && !isTableState(tableName, states) || - (newState != ZooKeeperProtos.Table.State.DISABLING && - !isTableState(tableName, states) )) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfNotInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - private void setTableStateInZK(final TableName tableName, - final ZooKeeperProtos.Table.State state) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); - if (ZKUtil.checkExists(this.watcher, znode) == -1) { - ZKUtil.createAndFailSilent(this.watcher, znode); - } - synchronized (this.cache) { - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(state); - byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.watcher, znode, data); - this.cache.put(tableName, state); - } - } - - /** - * Checks if table is marked in specified state in ZK. - * - * {@inheritDoc} - */ - @Override - public boolean isTableState(final TableName tableName, - final ZooKeeperProtos.Table.State... states) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); - return isTableInState(Arrays.asList(states), currentState); - } - } - - /** - * Deletes the table in zookeeper. Fails silently if the - * table is not currently disabled in zookeeper. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setDeletedTable(final TableName tableName) - throws CoordinatedStateException { - synchronized (this.cache) { - if (this.cache.remove(tableName) == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was " + - "already deleted"); - } - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * check if table is present. - * - * @param tableName table we're working on - * @return true if the table is present - */ - @Override - public boolean isTablePresent(final TableName tableName) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State state = this.cache.get(tableName); - return !(state == null); - } - } - - /** - * Gets a list of all the tables set as disabling in zookeeper. - * @return Set of disabling tables, empty Set if none - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - @Override - public Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException { - try { - return getAllTables(states); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - this.cache.remove(tableName); - if (deletePermanentState) { - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - } - } - - /** - * Gets a list of all the tables of specified states in zookeeper. - * @return Set of tables of specified states, empty Set if none - * @throws KeeperException - */ - Set getAllTables(final ZooKeeperProtos.Table.State... states) - throws KeeperException, InterruptedIOException { - - Set allTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode); - if(children == null) return allTables; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state; - try { - state = getTableState(watcher, tableName); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - for (ZooKeeperProtos.Table.State expectedState: states) { - if (state == expectedState) { - allTables.add(tableName); - break; - } - } - } - return allTables; - } - - /** - * Gets table state from ZK. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - /** - * @return true if current state isn't null and is contained - * in the list of expected states. - */ - private boolean isTableInState(final List expectedStates, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && expectedStates.contains(currentState); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 45bc524..dd9384d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2818,6 +2818,48 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * Will timeout after default period (30 seconds) + * @param table Table to wait on. + * @throws InterruptedException + * @throws IOException + */ + public void waitTableDisabled(byte[] table) + throws InterruptedException, IOException { + waitTableDisabled(getHBaseAdmin(), table, 30000); + } + + public void waitTableDisabled(Admin admin, byte[] table) + throws InterruptedException, IOException { + waitTableDisabled(admin, table, 30000); + } + + /** + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * @see #waitTableAvailable(byte[]) + * @param table Table to wait on. + * @param timeoutMillis Time to wait on it being marked disabled. + * @throws InterruptedException + * @throws IOException + */ + public void waitTableDisabled(byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + waitTableDisabled(getHBaseAdmin(), table, timeoutMillis); + } + + public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + TableName tableName = TableName.valueOf(table); + long startWait = System.currentTimeMillis(); + while (!admin.isTableDisabled(tableName)) { + assertTrue("Timed out waiting for table to become disabled " + + Bytes.toStringBinary(table), + System.currentTimeMillis() - startWait < timeoutMillis); + Thread.sleep(200); + } + } + + /** * Make sure that at least the specified number of region servers * are running * @param num minimum number of region servers that should be running diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java index 83aba8e..6ca50a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java @@ -151,8 +151,8 @@ public class TestHColumnDescriptorDefaultVersions { // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - hcds = htd.getColumnFamilies(); + TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + hcds = td.getHTableDescriptor().getColumnFamilies(); verifyHColumnDescriptor(expected, hcds, tableName, families); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java new file mode 100644 index 0000000..2327de6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java @@ -0,0 +1,37 @@ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +/** + * Test setting values in the descriptor + */ +@Category(SmallTests.class) +public class TestTableDescriptor { + final static Log LOG = LogFactory.getLog(TestTableDescriptor.class); + + @Test + public void testPb() throws DeserializationException, IOException { + HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); + final int v = 123; + htd.setMaxFileSize(v); + htd.setDurability(Durability.ASYNC_WAL); + htd.setReadOnly(true); + htd.setRegionReplication(2); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + byte[] bytes = td.toByteArray(); + TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes); + assertEquals(td, deserializedTd); + assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor()); + assertEquals(td.getTableState(), deserializedTd.getTableState()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index b91c962..cbbfa32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -59,13 +59,11 @@ import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.HLogUtilsForTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; import org.junit.AfterClass; @@ -257,7 +255,7 @@ public class TestAdmin { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); + ht.getName(), TableState.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -272,7 +270,7 @@ public class TestAdmin { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); + ht.getName(), TableState.State.ENABLED)); // Test that table is enabled try { @@ -345,7 +343,7 @@ public class TestAdmin { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED)); + TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); } @Test (timeout=300000) @@ -1127,8 +1125,7 @@ public class TestAdmin { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testMasterAdmin"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ZKTableStateClientSideReader.isEnabledTable(zkw, - TableName.valueOf("testMasterAdmin"))) { + while (!this.admin.isTableEnabled(TableName.valueOf("testMasterAdmin"))) { Thread.sleep(10); } this.admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 4b5556e..8231c50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -54,15 +54,14 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -126,7 +125,8 @@ public class TestAssignmentManagerOnCluster { } RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", metaState.getState(), State.OPEN); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertNotEquals("Meta should be moved off master", metaState.getServerName(), master.getServerName()); assertEquals("Meta should be on the meta server", @@ -152,7 +152,8 @@ public class TestAssignmentManagerOnCluster { regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); // Now, make sure meta is registered in zk metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", metaState.getState(), State.OPEN); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertEquals("Meta should be assigned", metaState.getServerName(), regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); assertNotEquals("Meta should be assigned on a different server", @@ -210,7 +211,8 @@ public class TestAssignmentManagerOnCluster { String table = "testAssignRegionOnRestartedServer"; TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); - TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect + //restart the master so that conf take into affect + TEST_UTIL.getMiniHBaseCluster().startMaster(); ServerName deadServer = null; HMaster master = null; @@ -623,9 +625,9 @@ public class TestAssignmentManagerOnCluster { } } am.regionOffline(hri); - am.getRegionStates().updateRegionState(hri, State.PENDING_OPEN, destServerName); + am.getRegionStates().updateRegionState(hri, RegionState.State.PENDING_OPEN, destServerName); - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING); + am.getTableStateManager().setTableState(table, TableState.State.DISABLING); List toAssignRegions = am.processServerShutdown(destServerName); assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.", am.getRegionStates() @@ -634,7 +636,7 @@ public class TestAssignmentManagerOnCluster { if (hri != null && serverName != null) { am.regionOnline(hri, serverName); } - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED); + am.getTableStateManager().setTableState(table, TableState.State.DISABLED); TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 288d115..9fa8278 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -298,13 +300,18 @@ public class TestCatalogJanitor { return new TableDescriptors() { @Override public HTableDescriptor remove(TableName tablename) throws IOException { - // TODO Auto-generated method stub + // noop return null; } @Override public Map getAll() throws IOException { - // TODO Auto-generated method stub + // noop + return null; + } + + @Override public Map getAllDescriptors() throws IOException { + // noop return null; } @@ -315,14 +322,24 @@ public class TestCatalogJanitor { } @Override + public TableDescriptor getDescriptor(TableName tablename) + throws IOException { + return createTableDescriptor(); + } + + @Override public Map getByNamespace(String name) throws IOException { return null; } @Override public void add(HTableDescriptor htd) throws IOException { - // TODO Auto-generated method stub + // noop + } + @Override + public void add(TableDescriptor htd) throws IOException { + // noop } }; } @@ -407,6 +424,11 @@ public class TestCatalogJanitor { } @Override + public TableStateManager getTableStateManager() { + return null; + } + + @Override public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible) throws IOException { } @@ -977,6 +999,11 @@ public class TestCatalogJanitor { return htd; } + private TableDescriptor createTableDescriptor() { + TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); + return htd; + } + private MultiResponse buildMultiResponse(MultiRequest req) { MultiResponse.Builder builder = MultiResponse.newBuilder(); RegionActionResult.Builder regionActionResultBuilder = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 69e6761..8e0dce4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; @@ -81,7 +81,7 @@ public class TestMaster { HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME); assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME, - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); TEST_UTIL.loadTable(ht, FAMILYNAME, false); ht.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index be3fe1a..5f81691 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.junit.Test; @@ -94,8 +94,8 @@ public class TestMasterRestartAfterDisablingTable { assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING)); + TableName.valueOf("tableRestart"), TableState.State.DISABLED, + TableState.State.DISABLING)); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -110,7 +110,7 @@ public class TestMasterRestartAfterDisablingTable { 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager() - .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED)); + .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index a04e4d0..b85b702 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -392,12 +393,14 @@ public class TestTableLockManager { alterThread.start(); splitThread.start(); + TEST_UTIL.waitTableEnabled(tableName.toBytes()); while (true) { List regions = admin.getTableRegions(tableName); LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions)); assertEquals(admin.getTableDescriptor(tableName), desc); for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) { - assertEquals(desc, region.getTableDesc()); + HTableDescriptor regionTableDesc = region.getTableDesc(); + assertEquals(desc, regionTableDesc); } if (regions.size() >= 5) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java index 7fdb72a..d33fdbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -154,8 +155,9 @@ public class TestTableDescriptorModification { // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - verifyTableDescriptor(htd, tableName, families); + TableDescriptor td = + FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); } private void verifyTableDescriptor(final HTableDescriptor htd, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index c147fd0..6a9c0dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.client.Admin; @@ -479,7 +480,8 @@ public class SnapshotTestingUtils { this.tableRegions = tableRegions; this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); new FSTableDescriptors(conf) - .createTableDescriptorForTableDirectory(snapshotDir, htd, false); + .createTableDescriptorForTableDirectory(snapshotDir, + new TableDescriptor(htd), false); } public HTableDescriptor getTableDescriptor() { @@ -574,7 +576,8 @@ public class SnapshotTestingUtils { private RegionData[] createTable(final HTableDescriptor htd, final int nregions) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); - new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, + new TableDescriptor(htd), false); assertTrue(nregions % 2 == 0); RegionData[] regions = new RegionData[nregions]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index aa16177..5fb947f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.client.TableState; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -69,14 +71,15 @@ public class TestFSTableDescriptors { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - assertTrue(fstd.createTableDescriptor(htd)); - assertFalse(fstd.createTableDescriptor(htd)); + assertTrue(fstd.createTableDescriptor(td)); + assertFalse(fstd.createTableDescriptor(td)); FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - fstd.updateTableDescriptor(htd); + fstd.updateTableDescriptor(td); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -90,20 +93,29 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor( TableName.valueOf("testSequenceidAdvancesOnTableInfo")); + TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - Path p0 = fstd.updateTableDescriptor(htd); + Path p0 = fstd.updateTableDescriptor(td); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); - Path p1 = fstd.updateTableDescriptor(htd); + Path p1 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); - Path p2 = fstd.updateTableDescriptor(htd); + Path p2 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); + td = new TableDescriptor(htd, TableState.State.DISABLED); + Path p3 = fstd.updateTableDescriptor(td); + // Assert we cleaned up the old file. + assertTrue(!fs.exists(p2)); + int i3 = FSTableDescriptors.getTableInfoSequenceId(p3); + assertTrue(i3 == i2 + 1); + TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName()); + assertEquals(descriptor, td); } @Test @@ -155,12 +167,13 @@ public class TestFSTableDescriptors { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); - fstd.createTableDescriptor(htd); - HTableDescriptor htd2 = + fstd.createTableDescriptor(td); + TableDescriptor td2 = FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); - assertTrue(htd.equals(htd2)); + assertTrue(td.equals(td2)); } @Test public void testHTableDescriptors() @@ -180,7 +193,8 @@ public class TestFSTableDescriptors { final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); + TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), + TableState.State.ENABLED); htds.createTableDescriptor(htd); } @@ -194,7 +208,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(htd); + htds.updateTableDescriptor(new TableDescriptor(htd)); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -277,18 +291,19 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - assertTrue(fstd.createTableDescriptor(htd)); - assertFalse(fstd.createTableDescriptor(htd)); + assertTrue(fstd.createTableDescriptor(td)); + assertFalse(fstd.createTableDescriptor(td)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); - assertTrue(fstd.createTableDescriptor(htd)); //this will re-create + assertTrue(fstd.createTableDescriptor(td)); //this will re-create Path tableDir = fstd.getTableDir(htd.getTableName()); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR); FileStatus[] statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); - assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); + assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 563d51d..6f16f67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -146,7 +147,8 @@ public class TestMergeTool extends HBaseTestCase { try { // Create meta region createMetaRegion(); - new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(this.desc); + new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor( + new TableDescriptor(this.desc)); /* * Create the regions we will merge */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java new file mode 100644 index 0000000..8a9ade6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestTableStateManager.java @@ -0,0 +1,108 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.zookeeper.KeeperException; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@Category(MediumTests.class) +public class TestTableStateManager { + private static final Log LOG = LogFactory.getLog(TestTableStateManager.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testTableStates() + throws IOException, KeeperException, InterruptedException { + final TableName name = + TableName.valueOf("testDisabled"); + TableStateManager stm = TEST_UTIL.getMiniHBaseCluster().getMaster().getTableStateManager(); + assertFalse(stm.isTableState(name, TableState.State.ENABLED)); + assertFalse(stm.isTableState(name, TableState.State.DISABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED)); + assertFalse(stm.isTableState(name, TableState.State.ENABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(stm.isTablePresent(name)); + TEST_UTIL.getHBaseAdmin().createTable(new HTableDescriptor(name)); + TEST_UTIL.waitTableEnabled(name.toBytes()); + assertTrue(stm.isTableState(name, TableState.State.ENABLED)); + stm.setTableState(name, TableState.State.DISABLING); + assertTrue(stm.isTableState(name, TableState.State.DISABLING)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(stm.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(stm.isTablePresent(name)); + stm.setTableState(name, TableState.State.DISABLED); + assertTrue(stm.isTableState(name, TableState.State.DISABLED)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLING)); + assertTrue(stm.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(stm.isTablePresent(name)); + stm.setTableState(name, TableState.State.ENABLING); + assertTrue(stm.isTableState(name, TableState.State.ENABLING)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED)); + assertFalse(stm.getTablesInStates(TableState.State.DISABLED).contains(name)); + assertTrue(stm.isTablePresent(name)); + stm.setTableState(name, TableState.State.ENABLED); + assertTrue(stm.isTableState(name, TableState.State.ENABLED)); + assertFalse(stm.isTableState(name, TableState.State.ENABLING)); + assertTrue(stm.isTablePresent(name)); + TEST_UTIL.getHBaseAdmin().disableTable(name); + TEST_UTIL.waitTableDisabled(name.toBytes()); + assertFalse(stm.isTableState(name, TableState.State.ENABLED)); + assertFalse(stm.isTableState(name, TableState.State.DISABLING)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertTrue(stm.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + TEST_UTIL.getHBaseAdmin().deleteTable(name); + assertFalse(stm.isTableState(name, TableState.State.ENABLED)); + assertFalse(stm.isTableState(name, TableState.State.DISABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED)); + assertFalse(stm.isTableState(name, TableState.State.ENABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED, TableState.State.DISABLING)); + assertFalse(stm.isTableState(name, TableState.State.DISABLED, TableState.State.ENABLING)); + assertFalse(stm.isTablePresent(name)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java deleted file mode 100644 index f5210cc..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; - -@Category(MediumTests.class) -public class TestZKTableStateManager { - private static final Log LOG = LogFactory.getLog(TestZKTableStateManager.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniZKCluster(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniZKCluster(); - } - - @Test - public void testTableStates() - throws CoordinatedStateException, IOException, KeeperException, InterruptedException { - final TableName name = - TableName.valueOf("testDisabled"); - Abortable abortable = new Abortable() { - @Override - public void abort(String why, Throwable e) { - LOG.info(why, e); - } - - @Override - public boolean isAborted() { - return false; - } - - }; - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - name.getNameAsString(), abortable, true); - TableStateManager zkt = new ZKTableStateManager(zkw); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLING); - assertTrue(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLED); - assertTrue(zkt.isTableState(name, Table.State.DISABLED)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLING); - assertTrue(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLED); - assertTrue(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTablePresent(name)); - zkt.setDeletedTable(name); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - } -}