diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index f28c44b..9cdffe9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -31,6 +31,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -114,6 +115,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; +import org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos; +import org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; @@ -2629,4 +2632,30 @@ public final class ProtobufUtil { } return result; } + + public static NamespaceStateProtos.NamespaceTableAndRegionInfo toProtoNamespaceState( + org.apache.hadoop.hbase.NamespaceTableAndRegionInfo namespaceState) { + NamespaceStateProtos.NamespaceTableAndRegionInfo.Builder b = + NamespaceStateProtos.NamespaceTableAndRegionInfo.newBuilder(); + b.setName(namespaceState.getName()); + for (String table : namespaceState.getTables()) { + NamespaceStateProtos.TableRegionCountPair.Builder tBuilder = + NamespaceStateProtos.TableRegionCountPair.newBuilder(); + tBuilder.setName(table); + tBuilder.setCount(namespaceState.getTableRegionCount(table)); + b.addTableRegionInfoMap(tBuilder.build()); + } + return b.build(); + } + + public static org.apache.hadoop.hbase.NamespaceTableAndRegionInfo toNamespaceState( + NamespaceStateProtos.NamespaceTableAndRegionInfo namespaceState) { + org.apache.hadoop.hbase.NamespaceTableAndRegionInfo.Builder b = + org.apache.hadoop.hbase.NamespaceTableAndRegionInfo. + create(namespaceState.getName()); + for (TableRegionCountPair p : namespaceState.getTableRegionInfoMapList()) { + b.putTableAndRegionInfo(p.getName(), p.getCount()); + } + return b.build(); + } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 76f7c34..155e1ff 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -109,6 +109,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String recoveringRegionsZNode; // znode containing namespace descriptors public static String namespaceZNode = "namespace"; + // znode containing information about tables and regions in a namespace. + public String namespaceStateZNode; // Certain ZooKeeper nodes need to be world-readable @@ -235,6 +237,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); namespaceZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); + namespaceStateZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.namespaceQuota", "namespace-state")); } /** diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceTableAndRegionInfo.java hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceTableAndRegionInfo.java new file mode 100644 index 0000000..7410215 --- /dev/null +++ hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceTableAndRegionInfo.java @@ -0,0 +1,218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import com.google.common.collect.Sets; + +/** + * NamespaceTableAndRegionInfo is a helper class that contains information + * about current state of tables and regions in a namespace. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class NamespaceTableAndRegionInfo { + private String name; + private Map tableRegionInfoMap; + + NamespaceTableAndRegionInfo(String name, Map tableAndRegionInfo) { + super(); + this.name = name; + this.tableRegionInfoMap = tableAndRegionInfo; + } + + /** + * Gets the name of the namespace. + * + * @return name of the namespace. + */ + public String getName() { + return name; + } + + /** + * Gets the set of table names belonging to namespace. + * + * @return A set of table names. + */ + public Set getTables() { + if (!this.tableRegionInfoMap.isEmpty()) { + return this.tableRegionInfoMap.keySet(); + } else { + return Sets.newHashSet(); + } + } + + /** + * Gets the total number of regions in namespace. + * + * @return the region count + */ + public int getRegionCount() { + int sum = 0; + if (!this.tableRegionInfoMap.isEmpty()) { + for (Integer count : this.tableRegionInfoMap.values()) { + sum = sum + count; + } + } + return sum; + } + + /** + * Gets the table region count. + * + * @param tableName the table name + * @return the table region count, -1 if the table does not exist in namespace. + */ + public int getTableRegionCount(String tableName) { + if(this.tableRegionInfoMap.containsKey(tableName)) { + return this.tableRegionInfoMap.get(tableName); + } else { + return -1; + } + } + + @Override + public String toString() { + return "NamespaceTableAndRegionInfo [name=" + name + ", tableRegionInfoMap=" + + tableRegionInfoMap + "]"; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((name == null) ? 0 : name.hashCode()); + result = prime * result + ((tableRegionInfoMap == null) ? 0 : tableRegionInfoMap.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + NamespaceTableAndRegionInfo other = (NamespaceTableAndRegionInfo) obj; + if (name == null) { + if (other.name != null) + return false; + } else if (!name.equals(other.name)) + return false; + if (tableRegionInfoMap == null) { + if (other.tableRegionInfoMap != null) + return false; + } else if (!tableRegionInfoMap.equals(other.tableRegionInfoMap)) + return false; + return true; + } + + /** + * Creates a builder for building NamespaceTableAndRegionInfo. + * + * @param name The name of namespace. + * @return An instance of builder. + */ + public static Builder create(String name) { + return new Builder(name); + } + + /** + * Creates a builder for building NamespaceTableAndRegionInfo. + * + * @param info An instance of NamespaceTableAndRegionInfo. + * @return An instance of builder. + */ + public static Builder create(NamespaceTableAndRegionInfo info) { + return new Builder(info); + } + + public static class Builder { + private String bName; + private Map bMap; + + private Builder(NamespaceTableAndRegionInfo info) { + this.bName = info.name; + this.bMap = info.tableRegionInfoMap; + } + + private Builder(String name) { + this.bName = name; + this.bMap = new HashMap(); + } + + /** + * Puts table and region info to the existing data. + * This method replaces any existing region count information. + * + * @param name The name of table + * @param numRegions The number of regions. + * @return An instance of Builder class. + */ + public Builder putTableAndRegionInfo(String name, int numRegions) { + this.bMap.put(name, new Integer(numRegions)); + return this; + } + + /** + * Sets the table and region count information. + * + * @param info Map of table name and associated region count. + * @return An instance of Builder class. + */ + public Builder setTableAndRegionInfo( Map info ) { + this.bMap = info; + return this; + } + + /** + * Removes the table and region information. + * + * @param tableName The name of table + * @return An instance of Builder class. + */ + public Builder removeTableAndRegionInfo(String tableName) { + this.bMap.remove(tableName); + return this; + } + + /** + * Builds an instance of NamespaceTableAndRegionInfo. + * + * @return An instance of NamespaceTableAndRegionInfo. + */ + public NamespaceTableAndRegionInfo build() { + if (this.bName == null) { + throw new IllegalArgumentException( + "A name has to be specified for storing namespace state information."); + } + NamespaceTableAndRegionInfo info = new NamespaceTableAndRegionInfo( + this.bName, this.bMap); + return info; + } + } +} diff --git hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java index 911fd16..24ff1d5 100644 --- hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java +++ hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java @@ -1724,8 +1724,6 @@ public final class BulkDeleteProtos { } } - - // @@protoc_insertion_point(class_scope:BulkDeleteService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java index a71e351..4aa9cb4 100644 --- hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java +++ hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/ExampleProtos.java @@ -1087,8 +1087,6 @@ public final class ExampleProtos { } } - - // @@protoc_insertion_point(class_scope:RowCountService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/pom.xml hbase-protocol/pom.xml index 7fd97d8..c96dd4b 100644 --- hbase-protocol/pom.xml +++ hbase-protocol/pom.xml @@ -174,6 +174,7 @@ Tracing.proto WAL.proto ZooKeeper.proto + NamespaceState.proto diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java index 0b70109..970c4b8 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java @@ -10321,8 +10321,6 @@ public final class AccessControlProtos { } } - - // @@protoc_insertion_point(class_scope:AccessControlService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 54e8f32..4ffc1ad 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -20732,8 +20732,6 @@ public final class AdminProtos { } } - - // @@protoc_insertion_point(class_scope:AdminService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java index 97e4b6a..e5e320b 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java @@ -2300,8 +2300,6 @@ public final class AggregateProtos { } } - - // @@protoc_insertion_point(class_scope:AggregateService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AuthenticationProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AuthenticationProtos.java index fb115fa..f00387c 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AuthenticationProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AuthenticationProtos.java @@ -4529,8 +4529,6 @@ public final class AuthenticationProtos { } } - - // @@protoc_insertion_point(class_scope:AuthenticationService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 9e1952d..e79c1e7 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -28898,8 +28898,6 @@ public final class ClientProtos { } } - - // @@protoc_insertion_point(class_scope:ClientService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 83a2152..092e859 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -42998,8 +42998,6 @@ public final class MasterProtos { } } - - // @@protoc_insertion_point(class_scope:MasterService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProtos.java index 3010ff9..66760b6 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MultiRowMutationProtos.java @@ -2154,8 +2154,6 @@ public final class MultiRowMutationProtos { } } - - // @@protoc_insertion_point(class_scope:MultiRowMutationService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/NamespaceStateProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/NamespaceStateProtos.java new file mode 100644 index 0000000..a449bdb --- /dev/null +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/NamespaceStateProtos.java @@ -0,0 +1,1577 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: NamespaceState.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class NamespaceStateProtos { + private NamespaceStateProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface NamespaceTableAndRegionInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // repeated .TableRegionCountPair tableRegionInfoMap = 2; + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + java.util.List + getTableRegionInfoMapList(); + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair getTableRegionInfoMap(int index); + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + int getTableRegionInfoMapCount(); + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + java.util.List + getTableRegionInfoMapOrBuilderList(); + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder getTableRegionInfoMapOrBuilder( + int index); + } + /** + * Protobuf type {@code NamespaceTableAndRegionInfo} + */ + public static final class NamespaceTableAndRegionInfo extends + com.google.protobuf.GeneratedMessage + implements NamespaceTableAndRegionInfoOrBuilder { + // Use NamespaceTableAndRegionInfo.newBuilder() to construct. + private NamespaceTableAndRegionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private NamespaceTableAndRegionInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final NamespaceTableAndRegionInfo defaultInstance; + public static NamespaceTableAndRegionInfo getDefaultInstance() { + return defaultInstance; + } + + public NamespaceTableAndRegionInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private NamespaceTableAndRegionInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableRegionInfoMap_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tableRegionInfoMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableRegionInfoMap_ = java.util.Collections.unmodifiableList(tableRegionInfoMap_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_NamespaceTableAndRegionInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_NamespaceTableAndRegionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NamespaceTableAndRegionInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NamespaceTableAndRegionInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .TableRegionCountPair tableRegionInfoMap = 2; + public static final int TABLEREGIONINFOMAP_FIELD_NUMBER = 2; + private java.util.List tableRegionInfoMap_; + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public java.util.List getTableRegionInfoMapList() { + return tableRegionInfoMap_; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public java.util.List + getTableRegionInfoMapOrBuilderList() { + return tableRegionInfoMap_; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public int getTableRegionInfoMapCount() { + return tableRegionInfoMap_.size(); + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair getTableRegionInfoMap(int index) { + return tableRegionInfoMap_.get(index); + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder getTableRegionInfoMapOrBuilder( + int index) { + return tableRegionInfoMap_.get(index); + } + + private void initFields() { + name_ = ""; + tableRegionInfoMap_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableRegionInfoMapCount(); i++) { + if (!getTableRegionInfoMap(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + for (int i = 0; i < tableRegionInfoMap_.size(); i++) { + output.writeMessage(2, tableRegionInfoMap_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + for (int i = 0; i < tableRegionInfoMap_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableRegionInfoMap_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo other = (org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getTableRegionInfoMapList() + .equals(other.getTableRegionInfoMapList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getTableRegionInfoMapCount() > 0) { + hash = (37 * hash) + TABLEREGIONINFOMAP_FIELD_NUMBER; + hash = (53 * hash) + getTableRegionInfoMapList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code NamespaceTableAndRegionInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_NamespaceTableAndRegionInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_NamespaceTableAndRegionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableRegionInfoMapFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableRegionInfoMapBuilder_ == null) { + tableRegionInfoMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableRegionInfoMapBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_NamespaceTableAndRegionInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo build() { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo result = new org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (tableRegionInfoMapBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tableRegionInfoMap_ = java.util.Collections.unmodifiableList(tableRegionInfoMap_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tableRegionInfoMap_ = tableRegionInfoMap_; + } else { + result.tableRegionInfoMap_ = tableRegionInfoMapBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (tableRegionInfoMapBuilder_ == null) { + if (!other.tableRegionInfoMap_.isEmpty()) { + if (tableRegionInfoMap_.isEmpty()) { + tableRegionInfoMap_ = other.tableRegionInfoMap_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.addAll(other.tableRegionInfoMap_); + } + onChanged(); + } + } else { + if (!other.tableRegionInfoMap_.isEmpty()) { + if (tableRegionInfoMapBuilder_.isEmpty()) { + tableRegionInfoMapBuilder_.dispose(); + tableRegionInfoMapBuilder_ = null; + tableRegionInfoMap_ = other.tableRegionInfoMap_; + bitField0_ = (bitField0_ & ~0x00000002); + tableRegionInfoMapBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableRegionInfoMapFieldBuilder() : null; + } else { + tableRegionInfoMapBuilder_.addAllMessages(other.tableRegionInfoMap_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getTableRegionInfoMapCount(); i++) { + if (!getTableRegionInfoMap(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.NamespaceTableAndRegionInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // repeated .TableRegionCountPair tableRegionInfoMap = 2; + private java.util.List tableRegionInfoMap_ = + java.util.Collections.emptyList(); + private void ensureTableRegionInfoMapIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tableRegionInfoMap_ = new java.util.ArrayList(tableRegionInfoMap_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder> tableRegionInfoMapBuilder_; + + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public java.util.List getTableRegionInfoMapList() { + if (tableRegionInfoMapBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableRegionInfoMap_); + } else { + return tableRegionInfoMapBuilder_.getMessageList(); + } + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public int getTableRegionInfoMapCount() { + if (tableRegionInfoMapBuilder_ == null) { + return tableRegionInfoMap_.size(); + } else { + return tableRegionInfoMapBuilder_.getCount(); + } + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair getTableRegionInfoMap(int index) { + if (tableRegionInfoMapBuilder_ == null) { + return tableRegionInfoMap_.get(index); + } else { + return tableRegionInfoMapBuilder_.getMessage(index); + } + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder setTableRegionInfoMap( + int index, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair value) { + if (tableRegionInfoMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.set(index, value); + onChanged(); + } else { + tableRegionInfoMapBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder setTableRegionInfoMap( + int index, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder builderForValue) { + if (tableRegionInfoMapBuilder_ == null) { + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.set(index, builderForValue.build()); + onChanged(); + } else { + tableRegionInfoMapBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder addTableRegionInfoMap(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair value) { + if (tableRegionInfoMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.add(value); + onChanged(); + } else { + tableRegionInfoMapBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder addTableRegionInfoMap( + int index, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair value) { + if (tableRegionInfoMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.add(index, value); + onChanged(); + } else { + tableRegionInfoMapBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder addTableRegionInfoMap( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder builderForValue) { + if (tableRegionInfoMapBuilder_ == null) { + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.add(builderForValue.build()); + onChanged(); + } else { + tableRegionInfoMapBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder addTableRegionInfoMap( + int index, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder builderForValue) { + if (tableRegionInfoMapBuilder_ == null) { + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.add(index, builderForValue.build()); + onChanged(); + } else { + tableRegionInfoMapBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder addAllTableRegionInfoMap( + java.lang.Iterable values) { + if (tableRegionInfoMapBuilder_ == null) { + ensureTableRegionInfoMapIsMutable(); + super.addAll(values, tableRegionInfoMap_); + onChanged(); + } else { + tableRegionInfoMapBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder clearTableRegionInfoMap() { + if (tableRegionInfoMapBuilder_ == null) { + tableRegionInfoMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableRegionInfoMapBuilder_.clear(); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public Builder removeTableRegionInfoMap(int index) { + if (tableRegionInfoMapBuilder_ == null) { + ensureTableRegionInfoMapIsMutable(); + tableRegionInfoMap_.remove(index); + onChanged(); + } else { + tableRegionInfoMapBuilder_.remove(index); + } + return this; + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder getTableRegionInfoMapBuilder( + int index) { + return getTableRegionInfoMapFieldBuilder().getBuilder(index); + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder getTableRegionInfoMapOrBuilder( + int index) { + if (tableRegionInfoMapBuilder_ == null) { + return tableRegionInfoMap_.get(index); } else { + return tableRegionInfoMapBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public java.util.List + getTableRegionInfoMapOrBuilderList() { + if (tableRegionInfoMapBuilder_ != null) { + return tableRegionInfoMapBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableRegionInfoMap_); + } + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder addTableRegionInfoMapBuilder() { + return getTableRegionInfoMapFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.getDefaultInstance()); + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder addTableRegionInfoMapBuilder( + int index) { + return getTableRegionInfoMapFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.getDefaultInstance()); + } + /** + * repeated .TableRegionCountPair tableRegionInfoMap = 2; + */ + public java.util.List + getTableRegionInfoMapBuilderList() { + return getTableRegionInfoMapFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder> + getTableRegionInfoMapFieldBuilder() { + if (tableRegionInfoMapBuilder_ == null) { + tableRegionInfoMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder>( + tableRegionInfoMap_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tableRegionInfoMap_ = null; + } + return tableRegionInfoMapBuilder_; + } + + // @@protoc_insertion_point(builder_scope:NamespaceTableAndRegionInfo) + } + + static { + defaultInstance = new NamespaceTableAndRegionInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:NamespaceTableAndRegionInfo) + } + + public interface TableRegionCountPairOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // required uint32 count = 2; + /** + * required uint32 count = 2; + */ + boolean hasCount(); + /** + * required uint32 count = 2; + */ + int getCount(); + } + /** + * Protobuf type {@code TableRegionCountPair} + */ + public static final class TableRegionCountPair extends + com.google.protobuf.GeneratedMessage + implements TableRegionCountPairOrBuilder { + // Use TableRegionCountPair.newBuilder() to construct. + private TableRegionCountPair(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableRegionCountPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableRegionCountPair defaultInstance; + public static TableRegionCountPair getDefaultInstance() { + return defaultInstance; + } + + public TableRegionCountPair getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableRegionCountPair( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + count_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_TableRegionCountPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_TableRegionCountPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.class, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableRegionCountPair parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableRegionCountPair(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 count = 2; + public static final int COUNT_FIELD_NUMBER = 2; + private int count_; + /** + * required uint32 count = 2; + */ + public boolean hasCount() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 count = 2; + */ + public int getCount() { + return count_; + } + + private void initFields() { + name_ = ""; + count_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCount()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, count_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, count_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair other = (org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && (hasCount() == other.hasCount()); + if (hasCount()) { + result = result && (getCount() + == other.getCount()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (hasCount()) { + hash = (37 * hash) + COUNT_FIELD_NUMBER; + hash = (53 * hash) + getCount(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableRegionCountPair} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPairOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_TableRegionCountPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_TableRegionCountPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.class, org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + count_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.internal_static_TableRegionCountPair_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair build() { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair result = new org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.count_ = count_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (other.hasCount()) { + setCount(other.getCount()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + if (!hasCount()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos.TableRegionCountPair) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // required uint32 count = 2; + private int count_ ; + /** + * required uint32 count = 2; + */ + public boolean hasCount() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 count = 2; + */ + public int getCount() { + return count_; + } + /** + * required uint32 count = 2; + */ + public Builder setCount(int value) { + bitField0_ |= 0x00000002; + count_ = value; + onChanged(); + return this; + } + /** + * required uint32 count = 2; + */ + public Builder clearCount() { + bitField0_ = (bitField0_ & ~0x00000002); + count_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableRegionCountPair) + } + + static { + defaultInstance = new TableRegionCountPair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableRegionCountPair) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_NamespaceTableAndRegionInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_NamespaceTableAndRegionInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableRegionCountPair_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableRegionCountPair_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\024NamespaceState.proto\"^\n\033NamespaceTable" + + "AndRegionInfo\022\014\n\004name\030\001 \002(\t\0221\n\022tableRegi" + + "onInfoMap\030\002 \003(\0132\025.TableRegionCountPair\"3" + + "\n\024TableRegionCountPair\022\014\n\004name\030\001 \002(\t\022\r\n\005" + + "count\030\002 \002(\rBG\n*org.apache.hadoop.hbase.p" + + "rotobuf.generatedB\024NamespaceStateProtosH" + + "\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_NamespaceTableAndRegionInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_NamespaceTableAndRegionInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NamespaceTableAndRegionInfo_descriptor, + new java.lang.String[] { "Name", "TableRegionInfoMap", }); + internal_static_TableRegionCountPair_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_TableRegionCountPair_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableRegionCountPair_descriptor, + new java.lang.String[] { "Name", "Count", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java index 261a6f2..b05a1dc 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java @@ -5443,8 +5443,6 @@ public final class RegionServerStatusProtos { } } - - // @@protoc_insertion_point(class_scope:RegionServerStatusService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java index fd0b1af..a4375ed 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RowProcessorProtos.java @@ -1636,8 +1636,6 @@ public final class RowProcessorProtos { } } - - // @@protoc_insertion_point(class_scope:RowProcessorService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java index d4e601a..03c3ed2 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java @@ -4809,8 +4809,6 @@ public final class SecureBulkLoadProtos { } } - - // @@protoc_insertion_point(class_scope:SecureBulkLoadService) } private static com.google.protobuf.Descriptors.Descriptor diff --git hbase-protocol/src/main/protobuf/NamespaceState.proto hbase-protocol/src/main/protobuf/NamespaceState.proto new file mode 100644 index 0000000..8984e6e --- /dev/null +++ hbase-protocol/src/main/protobuf/NamespaceState.proto @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains protocol buffers to represent the number of tables +// and regions belonging to a namespace. + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "NamespaceStateProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message NamespaceTableAndRegionInfo { + required string name = 1; + repeated TableRegionCountPair tableRegionInfoMap = 2; +} + +message TableRegionCountPair { + required string name = 1; + required uint32 count = 2; +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptors.java hbase-server/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptors.java new file mode 100644 index 0000000..83bbfbe --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptors.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * The NamespaceDescriptors is a helper interface that contains methods + * for retrieving namespace descriptors. + */ +@InterfaceAudience.Private +public interface NamespaceDescriptors { + + /** + * Gets the namespace descriptor. + * + * @param name The name of the namespace descriptor to retrieve. + * @return the namespace descriptor + */ + public NamespaceDescriptor get(String name); + +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java index d012fea..92a9ba6 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java @@ -47,7 +47,7 @@ import java.util.concurrent.ConcurrentSkipListMap; * */ @InterfaceAudience.Private -public class ZKNamespaceManager extends ZooKeeperListener { +public class ZKNamespaceManager extends ZooKeeperListener implements NamespaceDescriptors { private static Log LOG = LogFactory.getLog(ZKNamespaceManager.class); private final String nsZNode; private volatile NavigableMap cache; diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceStateManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceStateManager.java new file mode 100644 index 0000000..61c5031 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceStateManager.java @@ -0,0 +1,273 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; + +import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.MetaScanner; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.NamespaceStateProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +/** + * ZKNamespaceStateManager watches the current state of namespaces in + * terms of tables and regions. + */ +@InterfaceAudience.Private +public class ZKNamespaceStateManager extends ZooKeeperListener { + + private static Log LOG = LogFactory.getLog(ZKNamespaceStateManager.class); + private String nsStateZNode; + private volatile NavigableMap nsStateCache; + + public ZKNamespaceStateManager(ZooKeeperWatcher zkw) throws IOException { + super(zkw); + nsStateZNode = zkw.namespaceStateZNode; + nsStateCache = new ConcurrentSkipListMap(); + } + + /** + * Starts the ZKNamespaceStateManager and creates the associated ZNode. + * + * @throws IOException Signals that an I/O exception has occurred. + */ + public void start() throws IOException { + watcher.registerListener(this); + try { + if (ZKUtil.watchAndCheckExists(watcher, nsStateZNode)) { + List existing = ZKUtil + .listChildrenAndWatchForNewChildren(watcher, + nsStateZNode); + if (existing != null) { + refreshNodes(existing); + } + } else { + ZKUtil.createWithParents(watcher, nsStateZNode); + } + } catch (KeeperException e) { + throw new IOException("Failed to initialize ZKNamespaceStateManager", e); + } + } + + /** + * Gets an instance of NamespaceTableAndRegionInfo associated + * with namespace. + * @param name The name of the namespace + * @return An instance of NamespaceTableAndRegionInfo. + */ + public NamespaceTableAndRegionInfo getState(String name) { + NamespaceTableAndRegionInfo info = nsStateCache.get(name); + return info; + } + + /** + * Delete state associated with namespace. + * + * @param namespaceState the namespace state + * @throws IOException Signals that an I/O exception has occurred. + */ + public void deleteState(String namespaceState) throws IOException { + String zNode = ZKUtil.joinZNode(nsStateZNode, namespaceState); + try { + ZKUtil.deleteNode(watcher, zNode); + } catch (KeeperException e) { + LOG.error("Failed deleting state for " + zNode, e); + throw new IOException("Failed deleting state for " + zNode, e); + } + } + + public void init() throws IOException { + try { + List namespaces = ZKUtil.listChildrenNoWatch(watcher, this.nsStateZNode); + if ((namespaces != null) && (namespaces.size() > 0)) { + buildNamespaceTableAndRegionInfoFromMeta(); + } + } catch (KeeperException e) { + throw new IOException("Failed to initialize ZKNamespaceStateManager", e); + } + } + + @Override + public void nodeCreated(String path) { + if (nsStateZNode.equals(path)) { + try { + List existing = + ZKUtil.listChildrenAndWatchForNewChildren(watcher, nsStateZNode); + if (existing != null) { + refreshNodes(existing); + } + } catch (KeeperException ke) { + String msg = "Error reading data from zookeeper"; + LOG.error(msg, ke); + watcher.abort(msg, ke); + } catch (IOException e) { + String msg = "Error parsing data from zookeeper"; + LOG.error(msg, e); + watcher.abort(msg, e); + } + } + } + + @Override + public void nodeDeleted(String path) { + if (nsStateZNode.equals(ZKUtil.getParent(path))) { + String nsName = ZKUtil.getNodeName(path); + nsStateCache.remove(nsName); + LOG.debug("Data deleted " + path); + } + } + + @Override + public void nodeDataChanged(String path) { + if (nsStateZNode.equals(ZKUtil.getParent(path))) { + try { + byte[] data = ZKUtil.getDataAndWatch(watcher, path); + NamespaceTableAndRegionInfo ns = ProtobufUtil + .toNamespaceState(NamespaceStateProtos.NamespaceTableAndRegionInfo + .parseFrom(data)); + nsStateCache.put(ns.getName(), ns); + } catch (KeeperException ke) { + String msg = "Error reading data from zookeeper for node " + + path; + LOG.error(msg, ke); + // only option is to abort + watcher.abort(msg, ke); + } catch (IOException ioe) { + String msg = "Error deserializing namespace quota info : " + path; + LOG.error(msg, ioe); + watcher.abort(msg, ioe); + } + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (nsStateZNode.equals(path)) { + try { + List nodes = ZKUtil.listChildrenAndWatchForNewChildren( + watcher, nsStateZNode); + refreshNodes(nodes); + } catch (KeeperException ke) { + LOG.error("Error reading data from zookeeper for path " + path, + ke); + watcher.abort("Zookeeper error get node children for path " + + path, ke); + } catch (IOException e) { + LOG.error("Error deserializing namespace state child from: " + path, + e); + watcher.abort("Error deserializing namespace state child from: " + + path, e); + } + } + } + + private void refreshNodes(List nodePaths) + throws IOException, KeeperException { + for (String path : nodePaths) { + if (path.isEmpty()) { + continue; + } + String absolutePath = ZKUtil.joinZNode(nsStateZNode, path); + byte[] nodeData = ZKUtil.getDataAndWatch(watcher, absolutePath); + if (ArrayUtils.isNotEmpty(nodeData)) { + NamespaceTableAndRegionInfo ns = ProtobufUtil + .toNamespaceState(NamespaceStateProtos.NamespaceTableAndRegionInfo.parseFrom(nodeData)); + nsStateCache.put(ns.getName(), ns); + if (LOG.isDebugEnabled()) { + LOG.debug("Updating namespace state cache from node path " + absolutePath + + " with data: " + ns.toString()); + } + } else { + LOG.debug(" Data found to be empty at : " + absolutePath); + } + } + } + + /** + * Adds an instance of NamespaceTableAndRegionInfo associated with + * namespace to zk. + * @param ns An instance of NamespaceTableAndRegionInfo + * @throws IOException Signals that an I/O exception has occurred. + */ + public void putState(NamespaceTableAndRegionInfo ns) + throws IOException { + String zNode = ZKUtil.joinZNode(this.nsStateZNode, + ns.getName()); + try { + if (ZKUtil.checkExists(watcher, zNode) == -1) { + ZKUtil.createWithParents(watcher, zNode, ProtobufUtil.toProtoNamespaceState(ns) + .toByteArray()); + } else { + ZKUtil.setData(watcher, zNode, ProtobufUtil.toProtoNamespaceState(ns).toByteArray()); + } + } catch (KeeperException e) { + throw new IOException("Failed writing state for namespace " + ns.getName(), e); + } + nsStateCache.put(ns.getName(), ns); + } + + private void buildNamespaceTableAndRegionInfoFromMeta() throws IOException { + List regions = MetaScanner.listAllRegions(watcher.getConfiguration(), true); + Map> namespaceState = + new HashMap>(); + for (HRegionInfo region : regions) { + String table = region.getTable().getQualifierAsString(); + String namespace = region.getTable().getNamespaceAsString(); + Map tableInfo; + if (namespaceState.containsKey(namespace)) { + tableInfo = namespaceState.get(namespace); + if (tableInfo.containsKey(table)) { + tableInfo.put(table, tableInfo.get(table) + 1); + } else { + tableInfo.put(table, 1); + } + } else { + tableInfo = new HashMap(); + tableInfo.put(table, 1); + } + namespaceState.put(namespace, tableInfo); + } + for (Entry> namespace : namespaceState.entrySet()) { + NamespaceTableAndRegionInfo.Builder b = NamespaceTableAndRegionInfo.create(namespace + .getKey()); + b.setTableAndRegionInfo(namespace.getValue()); + NamespaceTableAndRegionInfo info = b.build(); + String zNode = ZKUtil.joinZNode(this.nsStateZNode, info.getName()); + try { + ZKUtil.createWithParents(watcher, zNode, ProtobufUtil.toProtoNamespaceState(info) + .toByteArray()); + } catch (KeeperException e) { + throw new IOException("Failed writing state for namespace " + info.getName(), e); + } + nsStateCache.put(namespace.getKey(), info); + } + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java new file mode 100644 index 0000000..c55b312 --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java @@ -0,0 +1,553 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.namespace; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceTableAndRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ZKNamespaceStateManager; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.mortbay.log.Log; + +/** + * NamespaceAuditor class is an observer class which audits the + * number of regions and tables present in a given namespace. + */ +@InterfaceAudience.Private +public class NamespaceAuditor extends BaseRegionObserver implements MasterObserver { + public static final String KEY_MAX_REGIONS = "hbase.namespace.quota.maxregions"; + public static final String KEY_MAX_TABLES = "hbase.namespace.quota.maxtables"; + boolean isRunningOnMaster = false; + private MasterServices masterServices; + private RegionServerServices rsServices; + private ZKNamespaceStateManager stateManager; + + @Override + public void start(CoprocessorEnvironment e) throws IOException { + if (e instanceof MasterCoprocessorEnvironment) { + // if running on HMaster + MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) e; + isRunningOnMaster = true; + masterServices = mEnv.getMasterServices(); + stateManager = new ZKNamespaceStateManager(masterServices.getZooKeeper()); + } else if (e instanceof RegionCoprocessorEnvironment) { + // if running at region + RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) e; + rsServices = regionEnv.getRegionServerServices(); + stateManager = new ZKNamespaceStateManager(rsServices.getZooKeeper()); + } + stateManager.start(); + } + + @Override + public void preCreateTable(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + if (masterServices.isInitialized()) { + checkNamespaceTableCount(desc, regions); + } + } + + @Override + public void postCreateTable(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + TableName tName = desc.getTableName(); + updateTable(tName.getNamespaceAsString(), tName.getQualifierAsString(), regions.length); + } + + @Override + public void preCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + } + + @Override + public void postCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + } + + @Override + public void preMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + } + + @Override + public void postMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + } + + @Override + public void preAssign(ObserverContext ctx, HRegionInfo regionInfo) + throws IOException { + } + + @Override + public void postAssign(ObserverContext ctx, HRegionInfo regionInfo) + throws IOException { + } + + @Override + public void preUnassign(ObserverContext ctx, + HRegionInfo regionInfo, boolean force) throws IOException { + } + + @Override + public void postUnassign(ObserverContext ctx, + HRegionInfo regionInfo, boolean force) throws IOException { + } + + @Override + public void preRegionOffline(ObserverContext ctx, + HRegionInfo regionInfo) throws IOException { + } + + @Override + public void postRegionOffline(ObserverContext ctx, + HRegionInfo regionInfo) throws IOException { + } + + @Override + public void preBalance(ObserverContext ctx) throws IOException { + } + + @Override + public void postBalance(ObserverContext ctx, List plans) + throws IOException { + } + + @Override + public boolean preBalanceSwitch(ObserverContext ctx, + boolean newValue) throws IOException { + return false; + } + + @Override + public void postBalanceSwitch(ObserverContext ctx, + boolean oldValue, boolean newValue) throws IOException { + } + + @Override + public void preShutdown(ObserverContext ctx) throws IOException { + } + + @Override + public void preStopMaster(ObserverContext ctx) throws IOException { + } + + @Override + public void postStartMaster(ObserverContext ctx) + throws IOException { + this.stateManager.init(); + } + + @Override + public void preSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void postSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void preCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void postCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void preRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void postRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + } + + @Override + public void preDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + } + + @Override + public void postDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + } + + @Override + public void preCreateNamespace( + ObserverContext ctx, NamespaceDescriptor ns) + throws IOException { + validateTableAndRegionCount(ns); + } + + @Override + public void postCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + NamespaceTableAndRegionInfo.Builder b = NamespaceTableAndRegionInfo + .create(ns.getName()); + stateManager.putState(b.build()); + } + + @Override + public void preDeleteNamespace(ObserverContext ctx, + String namespace) + throws IOException { + } + + @Override + public void postDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + stateManager.deleteState(namespace); + } + + @Override + public void preModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + validateTableAndRegionCount(ns); + } + + @Override + public void postModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + } + + private NamespaceTableAndRegionInfo getNamespaceTableAndRegionInfo(String namespace) + throws IOException { + return stateManager.getState(namespace); + } + + private void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException { + if (getMaxRegions(desc) <= 0) { + throw new ConstraintException("The max region quota for " + desc.getName() + + " is less than or equal to zero."); + } + if (getMaxTables(desc) <= 0) { + throw new ConstraintException("The max tables quota for " + desc.getName() + + " is less than or equal to zero."); + } + } + + private static long getMaxRegions(NamespaceDescriptor ns) throws IOException { + String value = ns.getConfigurationValue(KEY_MAX_REGIONS); + long maxRegions = 0; + if (StringUtils.isNotEmpty(value)) { + try { + maxRegions = Long.parseLong(value); + } catch (NumberFormatException exp) { + throw new ConstraintException("NumberFormatException while getting max regions.", exp); + } + } else { + // The property if not set, so assume its the max long value. + maxRegions = Long.MAX_VALUE; + } + return maxRegions; + } + + private static long getMaxTables(NamespaceDescriptor ns) throws IOException { + String value = ns.getConfigurationValue(KEY_MAX_TABLES); + long maxTables = 0; + if (StringUtils.isNotEmpty(value)) { + try { + maxTables = Long.parseLong(value); + } catch (NumberFormatException exp) { + throw new ConstraintException("NumberFormatException while getting max tables.", exp); + } + } else { + // The property if not set, so assume its the max long value. + maxTables = Long.MAX_VALUE; + } + return maxTables; + } + + private void checkNamespaceTableCount(HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + String namespace = desc.getTableName().getNamespaceAsString(); + NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace); + if (nspdesc != null) { + NamespaceTableAndRegionInfo currentStatus; + try { + currentStatus = getNamespaceTableAndRegionInfo(nspdesc.getName()); + } catch (IOException exp) { + throw new DoNotRetryIOException("Unable to obtain current table count from namespace " + + nspdesc, exp); + } + if ((currentStatus.getTables().size()) >= getMaxTables(nspdesc)) { + throw new DoNotRetryIOException("The table " + desc.getTableName().getNameAsString() + + "cannot be created as it would exceed maximum number of tables allowed " + + " in the namespace."); + } + + if ((currentStatus.getRegionCount() + regions.length) > getMaxRegions(nspdesc)) { + throw new DoNotRetryIOException("The table " + desc.getTableName().getNameAsString() + + " is not allowed to have " + regions.length + + " regions. The total number of regions permitted is only " + + getMaxRegions(nspdesc) + ", while current region count is " + + currentStatus.getRegionCount() + + ". This may be transient, please retry later if there are any" + + " ongoing split operations in the namespace."); + } + } + } + + private void checkNamespaceRegionCount(TableName name, String regionName) + throws IOException { + NamespaceDescriptor nspdesc = getNamespaceDescriptor(name.getNamespaceAsString()); + if (nspdesc != null) { + NamespaceTableAndRegionInfo currentStatus; + try { + currentStatus = getNamespaceTableAndRegionInfo(name.getNamespaceAsString()); + } catch (IOException exp) { + throw new DoNotRetryIOException("Unable to obtain current region count from namespace " + + nspdesc, exp); + } + if (currentStatus.getRegionCount() >= getMaxRegions(nspdesc)) { + throw new DoNotRetryIOException("The region " + + regionName + + " cannot be created. The region count will exceed quota on the namespace. " + + "This may be transient, please retry later if there are any ongoing split" + + " operations in the namespace."); + } + } + } + + private void updateTable(String namespace, String table, int num) throws IOException { + NamespaceTableAndRegionInfo currentStatus = stateManager.getState(namespace); + NamespaceTableAndRegionInfo.Builder b = NamespaceTableAndRegionInfo.create(currentStatus); + int currentRegionCount = currentStatus.getTableRegionCount(table); + if (currentRegionCount != -1) { + b.putTableAndRegionInfo(table, currentRegionCount + num); + } else { + b.putTableAndRegionInfo(table, num); + } + stateManager.putState(b.build()); + } + + private void deleteTable(String namespace, String name) throws IOException { + NamespaceTableAndRegionInfo currentStatus = stateManager.getState(namespace); + NamespaceTableAndRegionInfo.Builder b = NamespaceTableAndRegionInfo.create(currentStatus); + b.removeTableAndRegionInfo(name); + stateManager.putState(b.build()); + } + + private NamespaceDescriptor getNamespaceDescriptor(String namespace) + throws IOException { + if (isRunningOnMaster) { + return masterServices.getNamespaceDescriptor(namespace); + } else { + return rsServices.getNamespaceDescriptors().get(namespace); + } + } + + @Override + public void preDeleteTable(ObserverContext ctx, TableName tableName) + throws IOException { + } + + @Override + public void postDeleteTable(ObserverContext ctx, + TableName tableName) + throws IOException { + deleteTable(tableName.getNamespaceAsString(), tableName.getQualifierAsString()); + } + + @Override + public void preDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preModifyTable(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + } + + @Override + public void postModifyTable(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + + } + + @Override + public void preModifyTableHandler(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + } + + @Override + public void postModifyTableHandler(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + } + + @Override + public void preAddColumn(ObserverContext ctx, TableName tableName, + HColumnDescriptor column) throws IOException { + } + + @Override + public void postAddColumn(ObserverContext ctx, TableName tableName, + HColumnDescriptor column) throws IOException { + } + + @Override + public void preAddColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor column) throws IOException { + } + + @Override + public void postAddColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor column) throws IOException { + } + + @Override + public void preModifyColumn(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) throws IOException { + } + + @Override + public void postModifyColumn(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) throws IOException { + } + + @Override + public void preModifyColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) throws IOException { + } + + @Override + public void postModifyColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) throws IOException { + } + + @Override + public void preDeleteColumn(ObserverContext ctx, + TableName tableName, byte[] c) throws IOException { + } + + @Override + public void postDeleteColumn(ObserverContext ctx, + TableName tableName, byte[] c) throws IOException { + } + + @Override + public void preDeleteColumnHandler(ObserverContext ctx, + TableName tableName, byte[] c) throws IOException { + } + + @Override + public void postDeleteColumnHandler(ObserverContext ctx, + TableName tableName, byte[] c) throws IOException { + } + + @Override + public void preEnableTable(ObserverContext ctx, TableName tableName) + throws IOException { + } + + @Override + public void postEnableTable(ObserverContext ctx, + TableName tableName) + throws IOException { + } + + @Override + public void preEnableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postEnableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preDisableTable(ObserverContext ctx, + TableName tableName) + throws IOException { + } + + @Override + public void postDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preDisableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void postDisableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + } + + @Override + public void preGetTableDescriptors(ObserverContext ctx, + List tableNamesList, List descriptors) throws IOException { + } + + @Override + public void postGetTableDescriptors(ObserverContext ctx, + List descriptors) throws IOException { + } + + @Override + public void preMasterInitialization( + ObserverContext ctx) + throws IOException { + } + + @Override + public void postCompleteSplit( + ObserverContext ctx) + throws IOException { + RegionCoprocessorEnvironment regionEnv = (RegionCoprocessorEnvironment) ctx + .getEnvironment(); + TableName tName = regionEnv.getRegion().getTableDesc().getTableName(); + checkNamespaceRegionCount(tName, regionEnv.getRegion().getRegionNameAsString()); + updateTable(tName.getNamespaceAsString(), tName.getQualifierAsString(), 1); + } +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 02d5566..9e26daa 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.NamespaceDescriptors; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; @@ -79,6 +80,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.YouAreDeadException; +import org.apache.hadoop.hbase.ZKNamespaceManager; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaEditor; @@ -518,6 +520,9 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa private final ServerNonceManager nonceManager; private UserProvider userProvider; + + // Used by regions for auditing purposes. + private ZKNamespaceManager zkNamespaceManager; /** * Starts a HRegionServer at the default location @@ -763,6 +768,9 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa if(this.distributedLogReplay) { this.recoveringRegionWatcher = new RecoveringRegionWatcher(this.zooKeeper, this); } + + this.zkNamespaceManager = new ZKNamespaceManager(getZooKeeper()); + this.zkNamespaceManager.start(); } /** @@ -4572,4 +4580,9 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa respBuilder.setResponse(openInfoList.size()); return respBuilder.build(); } + + @Override + public NamespaceDescriptors getNamespaceDescriptors() throws IOException { + return (NamespaceDescriptors) zkNamespaceManager; + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index 2be10cd..55ddebd 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptors; +import org.apache.hadoop.hbase.ZKNamespaceManager; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.PriorityFunction; @@ -121,4 +123,12 @@ public interface RegionServerServices * @return The RegionServer's NonceManager */ public ServerNonceManager getNonceManager(); + + /** + * Gets the NamespaceDescriptors. + * + * @return An instance of NamespaceDescriptors. + * @throws IOException Signals that an I/O exception has occurred. + */ + public NamespaceDescriptors getNamespaceDescriptors() throws IOException; } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 50ad030..8795200 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -227,4 +227,9 @@ class MockRegionServerServices implements RegionServerServices { // TODO Auto-generated method stub return null; } + + @Override + public NamespaceDescriptors getNamespaceDescriptors() throws IOException { + return null; + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 45b9885..0b8ade2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.NamespaceDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; @@ -560,4 +561,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { public ServerNonceManager getNonceManager() { return null; } + + @Override + public NamespaceDescriptors getNamespaceDescriptors() throws IOException { + // TODO Auto-generated method stub + return null; + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java new file mode 100644 index 0000000..f7cafb1 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -0,0 +1,309 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.namespace; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceTableAndRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ZKNamespaceStateManager; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestNamespaceAuditor { + private static final Log LOG = LogFactory.getLog(TestNamespaceAuditor.class); + private static HBaseTestingUtility UTIL; + protected static HBaseAdmin admin; + private static ZKNamespaceStateManager stateManager; + private String prefix = "TestNamespaceAuditor"; + + @BeforeClass + public static void beforeClass() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + NamespaceAuditor.class.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + NamespaceAuditor.class.getName()); + UTIL = new HBaseTestingUtility(conf); + UTIL.startMiniCluster(); + admin = new HBaseAdmin(UTIL.getConfiguration()); + stateManager = new ZKNamespaceStateManager(UTIL.getZooKeeperWatcher()); + stateManager.start(); + LOG.info("Done initializing cluster"); + } + + @AfterClass + public static void tearDown() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @After + public void cleanup() throws IOException, KeeperException { + for (HTableDescriptor table : admin.listTables()) { + admin.disableTable(table.getName()); + admin.deleteTable(table.getName()); + } + for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) { + if (ns.getName().startsWith(prefix)) { + admin.deleteNamespace(ns.getName()); + } + } + } + + @Test + public void testTableOperations() throws Exception { + String nsp = prefix + "_np2"; + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "5") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "2").build(); + admin.createNamespace(nspDesc); + assertTrue(admin.getNamespaceDescriptor(nsp) != null); + assertEquals(admin.listNamespaceDescriptors().length, 3); + NamespaceTableAndRegionInfo stateInfo; + HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(nsp + + TableName.NAMESPACE_DELIM + "table1")); + HTableDescriptor tableDescTwo = new HTableDescriptor(TableName.valueOf(nsp + + TableName.NAMESPACE_DELIM + "table2")); + HTableDescriptor tableDescThree = new HTableDescriptor(TableName.valueOf(nsp + + TableName.NAMESPACE_DELIM + "table3")); + admin.createTable(tableDescOne); + boolean constraintViolated = false; + try { + //This should fail as the region quota will be violated. + admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); + } catch (Exception exp) { + assertTrue(exp instanceof IOException); + constraintViolated = true; + } finally { + assertTrue(constraintViolated); + } + //This should pass. + admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); + stateInfo = stateManager.getState(nsp); + assertEquals(2, stateInfo.getTables().size()); + assertEquals(5, stateInfo.getRegionCount()); + constraintViolated = false; + try { + admin.createTable(tableDescThree); + } catch (Exception exp) { + assertTrue(exp instanceof IOException); + constraintViolated = true; + stateInfo = stateManager.getState(nsp); + assertEquals(2, stateInfo.getTables().size()); + assertEquals(5, stateInfo.getRegionCount()); + } finally { + assertTrue(constraintViolated); + } + admin.disableTable(tableDescOne.getName()); + admin.deleteTable(tableDescOne.getName()); + admin.disableTable(tableDescTwo.getName()); + admin.deleteTable(tableDescTwo.getName()); + admin.deleteNamespace(nsp); + stateInfo = stateManager.getState(nsp); + assertTrue(stateInfo == null); + } + + @Test + public void testRegionOperations() throws Exception { + String nsp1 = prefix + "_regiontest"; + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "6") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "2").build(); + admin.createNamespace(nspDesc); + boolean constraintViolated = false; + HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(nsp1 + + TableName.NAMESPACE_DELIM + "table1")); + tableDescOne.addFamily(new HColumnDescriptor(Bytes.toBytes("info"))); + NamespaceTableAndRegionInfo stateInfo; + try { + admin.createTable(tableDescOne, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 7); + } catch (Exception exp) { + assertTrue(exp instanceof DoNotRetryIOException); + LOG.info(exp); + constraintViolated = true; + } finally { + assertTrue(constraintViolated); + } + assertFalse(admin.tableExists(tableDescOne.getName())); + + // This call will pass. + admin.createTable(tableDescOne, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 5); + stateInfo = stateManager.getState(nsp1); + assertEquals(1, stateInfo.getTables().size()); + assertEquals(5, stateInfo.getRegionCount()); + HTable table = new HTable(UTIL.getConfiguration(), tableDescOne.getName()); + assertEquals(5, table.getRegionLocations().size()); + UTIL.loadTable(table, Bytes.toBytes("info")); + + admin.split(tableDescOne.getName()); + // Sleep for the number of regions to settle. + Thread.sleep(10000); + int count = table.getRegionLocations().size(); + assertTrue(count > 5); + // This will not pass. + admin.split(tableDescOne.getName()); + //Make sure no regions have been added. + assertTrue(count == table.getRegionLocations().size()); + table.close(); + admin.disableTable(tableDescOne.getName()); + admin.deleteTable(tableDescOne.getName()); + admin.deleteNamespace(nsp1); + stateInfo = stateManager.getState(nsp1); + assertTrue(stateInfo == null); + } + + @Test + public void testValidQuotas() throws Exception { + boolean exceptionCaught = false; + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(prefix + "vq1") + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "hihdufh") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "2").build(); + try { + admin.createNamespace(nspDesc); + } catch (Exception exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); + } + nspDesc = NamespaceDescriptor.create(prefix + "vq2") + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "-456") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "2").build(); + try { + admin.createNamespace(nspDesc); + } catch (Exception exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); + } + nspDesc = NamespaceDescriptor.create(prefix + "vq3") + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "10") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "sciigd").build(); + try { + admin.createNamespace(nspDesc); + } catch (Exception exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); + } + nspDesc = NamespaceDescriptor.create(prefix + "vq4") + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "10") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "-1500").build(); + try { + admin.createNamespace(nspDesc); + } catch (Exception exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + assertFalse(fs.exists(FSUtils.getNamespaceDir(rootDir, nspDesc.getName()))); + } + assertTrue(admin.listNamespaceDescriptors().length == 2); + } + + @Test + public void testDeleteTable() throws Exception { + String namespace = prefix + "_dummy"; + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(namespace) + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "100") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "3").build(); + admin.createNamespace(nspDesc); + assertTrue(admin.getNamespaceDescriptor(namespace) != null); + NamespaceTableAndRegionInfo stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo != null); + assertEquals(0, stateInfo.getTables().size()); + assertEquals(0, stateInfo.getRegionCount()); + + HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(namespace + + TableName.NAMESPACE_DELIM + "table1")); + HTableDescriptor tableDescTwo = new HTableDescriptor(TableName.valueOf(namespace + + TableName.NAMESPACE_DELIM + "table2")); + admin.createTable(tableDescOne); + admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); + stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo != null); + assertEquals(2, stateInfo.getTables().size()); + assertEquals(6, stateInfo.getRegionCount()); + admin.disableTable(tableDescOne.getName()); + admin.deleteTable(tableDescOne.getName()); + stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo != null); + assertEquals(1, stateInfo.getTables().size()); + assertEquals(5, stateInfo.getRegionCount()); + + admin.disableTable(tableDescTwo.getName()); + admin.deleteTable(tableDescTwo.getName()); + stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo != null); + assertEquals(0, stateInfo.getTables().size()); + assertEquals(0, stateInfo.getRegionCount()); + admin.deleteNamespace(namespace); + stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo == null); + } + + @Test + public void testNamespaceCreateAndDelete() throws IOException, InterruptedException { + String namespace = prefix + "_testNamespaceCreateAndDelete"; + NamespaceDescriptor nspDesc = NamespaceDescriptor.create(namespace) + .addConfiguration(NamespaceAuditor.KEY_MAX_REGIONS, "100") + .addConfiguration(NamespaceAuditor.KEY_MAX_TABLES, "3").build(); + admin.createNamespace(nspDesc); + Thread.sleep(5000); + NamespaceTableAndRegionInfo stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo != null); + assertEquals(0, stateInfo.getTables().size()); + assertEquals(0, stateInfo.getRegionCount()); + admin.deleteNamespace(namespace); + stateInfo = stateManager.getState(namespace); + assertTrue(stateInfo == null); + } +}